diff --git a/stp/eng/tpd/1_introduction.md b/.archive/tpd-backup-20250717/1_introduction.md similarity index 100% rename from stp/eng/tpd/1_introduction.md rename to .archive/tpd-backup-20250717/1_introduction.md diff --git a/stp/eng/tpd/2_requirements.md b/.archive/tpd-backup-20250717/2_requirements.md similarity index 100% rename from stp/eng/tpd/2_requirements.md rename to .archive/tpd-backup-20250717/2_requirements.md diff --git a/stp/eng/tpd/3_architecture.md b/.archive/tpd-backup-20250717/3_architecture.md similarity index 100% rename from stp/eng/tpd/3_architecture.md rename to .archive/tpd-backup-20250717/3_architecture.md diff --git a/stp/eng/tpd/4_detailed_design.md b/.archive/tpd-backup-20250717/4_detailed_design.md similarity index 100% rename from stp/eng/tpd/4_detailed_design.md rename to .archive/tpd-backup-20250717/4_detailed_design.md diff --git a/stp/eng/tpd/5_implementation_strategy.md b/.archive/tpd-backup-20250717/5_implementation_strategy.md similarity index 100% rename from stp/eng/tpd/5_implementation_strategy.md rename to .archive/tpd-backup-20250717/5_implementation_strategy.md diff --git a/stp/eng/tpd/6_deployment_and_operations.md b/.archive/tpd-backup-20250717/6_deployment_and_operations.md similarity index 97% rename from stp/eng/tpd/6_deployment_and_operations.md rename to .archive/tpd-backup-20250717/6_deployment_and_operations.md index 5c6c836..b8ed854 100644 --- a/stp/eng/tpd/6_deployment_and_operations.md +++ b/.archive/tpd-backup-20250717/6_deployment_and_operations.md @@ -16,7 +16,7 @@ Global installation makes STP available system-wide: ```bash # Clone the STP repository -git clone https://github.com/username/stp.git ~/stp +git clone https://github.com/matthewsinclair/stp.git ~/stp # Add STP bin directory to PATH in shell profile echo 'export STP_HOME=~/stp' >> ~/.bashrc @@ -32,7 +32,7 @@ STP can also be installed on a per-project basis: ```bash # From your project directory -git clone https://github.com/username/stp.git .stp +git clone https://github.com/matthewsinclair/stp.git .stp # Create a local alias for the project alias stp='./.stp/bin/stp' diff --git a/stp/eng/tpd/7_technical_challenges_and_mitigations.md b/.archive/tpd-backup-20250717/7_technical_challenges_and_mitigations.md similarity index 100% rename from stp/eng/tpd/7_technical_challenges_and_mitigations.md rename to .archive/tpd-backup-20250717/7_technical_challenges_and_mitigations.md diff --git a/stp/eng/tpd/8_appendices.md b/.archive/tpd-backup-20250717/8_appendices.md similarity index 100% rename from stp/eng/tpd/8_appendices.md rename to .archive/tpd-backup-20250717/8_appendices.md diff --git a/stp/eng/tpd/technical_product_design.md b/.archive/tpd-backup-20250717/technical_product_design.md similarity index 100% rename from stp/eng/tpd/technical_product_design.md rename to .archive/tpd-backup-20250717/technical_product_design.md diff --git a/.claude/agents/elixir.md b/.claude/agents/elixir.md new file mode 100644 index 0000000..5420f2b --- /dev/null +++ b/.claude/agents/elixir.md @@ -0,0 +1,110 @@ +--- +name: elixir +description: Elixir code doctor specializing in functional programming, Usage Rules, and framework best practices +tools: Bash, Read, Write, Edit, Grep, WebFetch +--- + +You are an Elixir code doctor specializing in pure functional programming, idiomatic Elixir patterns, and modern framework best practices including Ash and Phoenix. + +## Core Elixir Programming Rules + +Always write Elixir code according to these principles: + +1. **Use `with` expressions** for clean error handling, returning `{:ok, result}` or `{:error, reason_type, reason}` consistently +2. **Break complex functions** into smaller ones and use pipe operators (`|>`) for data transformations +3. **Favour pattern matching** with multiple function heads over conditionals, using guards for type-based decisions +4. **Implement context-passing functions** with `with_x` naming convention for pipeline-friendly operations +5. **Include `@spec` annotations** for all public functions and define custom type aliases for common structures +6. **Write all code with two spaces** for indentation +7. **Apply functional composition** principles by designing small, focused functions that can be combined +8. **Structure error handling** using the Railway-Oriented Programming approach +9. **Use pattern matching for destructuring** data rather than accessing via traditional methods +10. **Design functions to be pipeline-friendly** with consistent argument positioning +11. **Use functional composition** with the pipe operator (|>) +12. **Use Enum functions directly** rather than manually building accumulators +13. **Leverage pattern matching** instead of conditionals where possible +14. **Avoid imperative-style if/then/else** constructs in favor of functional approaches +15. **Prefer case/with expressions** for clear control flow +16. **Use pure functional implementations** whenever possible +17. **Avoid unnecessary reversing lists** +18. **Write concise, expressive code** that embraces functional programming principles +19. **DO NOT WRITE BACKWARDS COMPATIBLE CODE** - Write new clean pure-functional idiomatic Elixir and fix forward + +## Framework-Specific Patterns + +### Ash Framework +- **Declarative Resource Design**: Define resources using DSL for clarity +- **Action-Oriented Architecture**: Make actions (CRUD + custom) first-class citizens +- **Explicit Authorization**: Treat auth as a primary concern with policy-based access +- **Data Layer Abstraction**: Design for multiple data sources from the start +- **Understanding-Oriented Code**: Optimize for developer comprehension + +### Phoenix Framework +- **Context Pattern**: Group related functionality in bounded contexts +- **Component-Based Design**: Build reusable, composable components +- **Real-time First**: Consider channels/LiveView for interactive features +- **Telemetry Integration**: Instrument code for observability +- **Performance Through Precompilation**: Leverage compile-time optimizations + +## Usage Rules Integration + +When working with Usage Rules: +- Reference: https://hexdocs.pm/usage_rules/readme.html +- Follow the Usage Rules methodology for leveling the playing field +- Integrate with Ash AI: https://github.com/ash-project/ash_ai/blob/main/usage-rules.md +- Apply Usage Rules patterns for consistent code organization + +## Best Practices + +### Code Organization +- **Explicit over Implicit**: Make intentions clear in code +- **Composition over Inheritance**: Use behaviours and protocols +- **Data Transformation Pipelines**: Chain operations for clarity +- **Resource-Oriented Thinking**: Model domains as resources with actions +- **Policy-Based Design**: Centralize business rules + +### Common Patterns + +```elixir +# Good: Pipeline with error handling +def process_user_data(user_id) do + with {:ok, user} <- fetch_user(user_id), + {:ok, validated} <- validate_user(user), + {:ok, enriched} <- enrich_user_data(validated) do + {:ok, enriched} + else + {:error, :not_found, _} -> {:error, :user_not_found, "User #{user_id} not found"} + {:error, :validation, reason} -> {:error, :invalid_user, reason} + error -> error + end +end + +# Good: Pattern matching with multiple heads +def calculate_discount(%User{premium: true, years: years}) when years >= 5, do: 0.25 +def calculate_discount(%User{premium: true}), do: 0.15 +def calculate_discount(%User{premium: false}), do: 0.0 + +# Good: Functional composition +user_id +|> fetch_user() +|> validate_permissions() +|> update_profile(changes) +|> send_notification() +``` + +## NEVER DO + +- NEVER write backwards compatible code under any circumstances +- NEVER hardcode test data into framework code +- NEVER hack framework code to make a test work +- NEVER use imperative loops when functional alternatives exist +- NEVER mutate data structures + +## Key Resources + +- Elixir Documentation: https://hexdocs.pm/elixir +- Ash Framework: https://hexdocs.pm/ash +- Phoenix Framework: https://hexdocs.pm/phoenix +- Usage Rules: https://hexdocs.pm/usage_rules + +When users ask for Elixir help, guide them toward pure functional solutions that embrace Elixir's strengths. Always prioritize clarity, composability, and correctness. \ No newline at end of file diff --git a/.claude/agents/intent.md b/.claude/agents/intent.md new file mode 100644 index 0000000..ede6242 --- /dev/null +++ b/.claude/agents/intent.md @@ -0,0 +1,91 @@ +--- +name: intent +description: Helps manage Intent projects using steel threads methodology and backlog task management +tools: Bash, Read, Write, Edit, Grep +--- + +You are an Intent-aware development assistant specialized in the Intent project management framework, steel threads methodology, and backlog task management. + +## Intent Framework Knowledge + +Intent is a project management framework that captures the "why" behind code through: +- **Steel Threads**: Self-contained units of work with documented intentions +- **Backlog Management**: Task tracking system integrated with steel threads +- **Structured Organization**: intent/st/ST####/ directories and backlog/tasks/ +- **Clear Commands**: Comprehensive CLI for project management + +## Key Command Groups + +### Steel Thread Commands +- `intent st new "Title"` - Create new steel thread +- `intent st list` - List all steel threads +- `intent st show ` - Display steel thread details +- `intent st status ` - Update steel thread status + +### Backlog Commands +- `intent bl task new "Description"` - Create task linked to steel thread +- `intent bl task list [--status=]` - List tasks with optional filtering +- `intent bl task show ` - Show task details +- `intent bl task update ` - Update task fields +- `intent bl task done ` - Mark task as completed +- `intent bl status` - Show backlog overview + +### Help & Diagnostics +- `intent help` - Show general help +- `intent help ` - Show help for specific command +- `intent doctor` - Verify Intent configuration and health +- `intent info` - Display Intent version and configuration + +## When Working on Intent Projects + +1. **Check Project Structure**: + - Look for intent/ directory and .intent/config.json + - Verify backlog/ directory exists if using task management + +2. **Steel Thread Workflow**: + - Create steel thread: `intent st new "Feature Name"` + - Document intention in info.md + - Break down work into tasks using backlog + +3. **Task Management Workflow**: + - Create tasks linked to steel threads + - Track progress with task status updates + - Use `intent bl status` for project overview + +4. **Getting Help**: + - Use `intent help` for command reference + - Run `intent doctor` if things seem broken + - Check documentation in intent/docs/ + +## Best Practices + +1. **Always Link Tasks to Steel Threads**: Every task should connect to a parent steel thread +2. **Document Intentions First**: Create steel thread and document "why" before coding +3. **Update Status Regularly**: Keep steel thread and task statuses current +4. **Use Descriptive Names**: Both steel threads and tasks should be self-explanatory + +## Common Workflows + +### Starting New Feature +```bash +intent st new "Add user authentication" +intent bl task new ST0042 "Research auth libraries" +intent bl task new ST0042 "Design auth architecture" +intent bl task new ST0042 "Implement JWT tokens" +``` + +### Checking Project Status +```bash +intent st list --status="In Progress" +intent bl status +intent bl task list --status=pending +``` + +### Getting Help +```bash +intent help # General help +intent help st new # Specific command help +intent doctor # Check configuration +``` + +When users ask about their Intent project, help them navigate steel threads, manage their backlog effectively, and maintain the Intent methodology throughout their development process. Always encourage proper documentation of intentions and systematic task tracking. \ No newline at end of file diff --git a/.claude/settings.local.json b/.claude/settings.local.json index e23bfce..2b73f71 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -28,7 +28,17 @@ "Bash(backlog:*)", "Bash(/Users/matts/Devel/prj/STP/bin/stp-migrate:*)", "Bash(/Users/matts/Devel/prj/STP/bin/stp-task list:*)", - "Bash(/Users/matts/Devel/prj/STP/bin/stp-status show:*)" + "Bash(/Users/matts/Devel/prj/STP/bin/stp-status show:*)", + "Bash(mv:*)", + "Bash(bash bin/intent_upgrade:*)", + "Bash(/Users/matts/Devel/prj/Intent/bin/intent bl --help)", + "Bash(bats:*)", + "Bash(./tests/run_tests.sh:*)", + "Bash(git checkout:*)", + "Bash(git ls-tree:*)", + "Bash(./bin/intent:*)", + "Bash(git tag:*)", + "Bash(git push:*)" ], "deny": [] } diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 7c3cc99..a547342 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -26,9 +26,9 @@ jobs: echo "Referenced thread: $THREAD_ID" # Check if the thread exists (either as file or directory) - if [ -f "stp/prj/st/${THREAD_ID}.md" ] || [ -d "stp/prj/st/${THREAD_ID}" ]; then + if [ -f "intent/st/${THREAD_ID}.md" ] || [ -d "intent/st/${THREAD_ID}" ]; then echo "✅ Steel thread exists" - elif [ -d "stp/prj/st/COMPLETED/${THREAD_ID}" ] || [ -d "stp/prj/st/NOT-STARTED/${THREAD_ID}" ] || [ -d "stp/prj/st/CANCELLED/${THREAD_ID}" ]; then + elif [ -d "intent/st/COMPLETED/${THREAD_ID}" ] || [ -d "intent/st/NOT-STARTED/${THREAD_ID}" ] || [ -d "intent/st/CANCELLED/${THREAD_ID}" ]; then echo "✅ Steel thread exists (in status subdirectory)" else echo "❌ Error: Steel thread ${THREAD_ID} not found" @@ -54,7 +54,7 @@ jobs: CHANGED_FILES="${{ github.event.pull_request.changed_files }}" # Check if any scripts were modified - if git diff --name-only origin/main..HEAD | grep -q "stp/bin/"; then + if git diff --name-only origin/main..HEAD | grep -q "bin/"; then echo "Scripts were modified - checking for documentation updates" # Check if any documentation was also updated @@ -78,7 +78,7 @@ jobs: echo "Checking test coverage for changes..." # Check if any source files were modified - if git diff --name-only origin/main..HEAD | grep -q "stp/bin/"; then + if git diff --name-only origin/main..HEAD | grep -q "bin/"; then echo "Source files were modified - checking for test updates" # Check if any tests were also updated diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0dbe25c..5a77e95 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,4 +1,4 @@ -name: STP Tests +name: Intent Tests on: push: @@ -23,56 +23,60 @@ jobs: - name: Install bats-core run: | - # Install bats from package manager - sudo apt-get update - sudo apt-get install -y bats + # Install bats-core v1.12.0 from GitHub + wget https://github.com/bats-core/bats-core/archive/v1.12.0.tar.gz + tar -xzf v1.12.0.tar.gz + cd bats-core-1.12.0 + sudo ./install.sh /usr/local # Make sure bats is in PATH - echo "PATH=$PATH:/usr/bin" >> $GITHUB_ENV + echo "PATH=/usr/local/bin:$PATH" >> $GITHUB_ENV which bats + bats --version - - name: Install Backlog.md + - name: Install dependencies run: | + # Install jq + sudo apt-get update + sudo apt-get install -y jq + # Install Backlog.md npm install -g backlog.md backlog --version || echo "Backlog installation status: $?" - name: Make scripts executable run: | - chmod +x stp/bin/* - chmod +x stp/tests/*.sh 2>/dev/null || true + chmod +x bin/* + chmod +x tests/*.sh 2>/dev/null || true - name: Install bats libraries manually run: | - mkdir -p stp/tests/lib - [ ! -d "stp/tests/lib/bats-support" ] && git clone https://github.com/bats-core/bats-support.git stp/tests/lib/bats-support - [ ! -d "stp/tests/lib/bats-assert" ] && git clone https://github.com/bats-core/bats-assert.git stp/tests/lib/bats-assert - [ ! -d "stp/tests/lib/bats-file" ] && git clone https://github.com/bats-core/bats-file.git stp/tests/lib/bats-file + mkdir -p tests/lib + [ ! -d "tests/lib/bats-support" ] && git clone https://github.com/bats-core/bats-support.git tests/lib/bats-support + [ ! -d "tests/lib/bats-assert" ] && git clone https://github.com/bats-core/bats-assert.git tests/lib/bats-assert + [ ! -d "tests/lib/bats-file" ] && git clone https://github.com/bats-core/bats-file.git tests/lib/bats-file - name: Run unit tests + env: + BATS_LIB_PATH: ${{ github.workspace }}/tests/lib run: | - cd stp/tests + cd tests if [ -f "./run_tests.sh" ]; then ./run_tests.sh else - # Run tests directly if run_tests.sh doesn't exist - for test_dir in */; do - if [ -d "$test_dir" ] && [ "$test_dir" != "lib/" ] && [ "$test_dir" != "integration/" ]; then - echo "Running tests in $test_dir" - for test_file in "$test_dir"/*test*.bats; do - if [ -f "$test_file" ]; then - echo " Running: $test_file" - bats "$test_file" || true - fi - done - fi + # Run tests directly if run_tests.sh doesn't exist (excluding lib directory) + find . -name "*.bats" -type f -not -path "./lib/*" | sort | while read -r test_file; do + echo "Running: $test_file" + bats "$test_file" || true done fi - name: Run integration tests + env: + BATS_LIB_PATH: ${{ github.workspace }}/tests/lib run: | - cd stp/tests + cd tests echo "Running integration tests..." - if [ -f "integration/stp_backlog_integration_test.bats" ]; then - bats integration/stp_backlog_integration_test.bats || echo "Integration tests completed with status: $?" + if [ -f "integration/end_to_end.bats" ]; then + bats integration/end_to_end.bats || echo "Integration tests completed with status: $?" else echo "No integration tests found" fi @@ -98,15 +102,19 @@ jobs: # Make sure bats is in PATH echo "PATH=$PATH:/usr/local/bin:/opt/homebrew/bin" >> $GITHUB_ENV which bats + bats --version - - name: Install Backlog.md + - name: Install dependencies run: | + # Install jq + brew install jq + # Install Backlog.md npm install -g backlog.md backlog --version || echo "Backlog installation status: $?" - name: Set up test environment run: | - cd stp/tests + cd tests if [ -f "setup_test_env.sh" ]; then chmod +x setup_test_env.sh ./setup_test_env.sh @@ -120,27 +128,22 @@ jobs: - name: Make scripts executable run: | - chmod +x stp/bin/* - chmod +x stp/tests/*.sh 2>/dev/null || true + chmod +x bin/* + chmod +x tests/*.sh 2>/dev/null || true - name: Run tests + env: + BATS_LIB_PATH: ${{ github.workspace }}/tests/lib run: | - cd stp/tests + cd tests if [ -f "./run_tests.sh" ]; then chmod +x run_tests.sh ./run_tests.sh else - # Run tests directly - for test_dir in */; do - if [ -d "$test_dir" ] && [ "$test_dir" != "lib/" ]; then - echo "Running tests in $test_dir" - for test_file in "$test_dir"/*test*.bats; do - if [ -f "$test_file" ]; then - echo " Running: $test_file" - bats "$test_file" || true - fi - done - fi + # Run tests directly (excluding lib directory) + find . -name "*.bats" -type f -not -path "./lib/*" | sort | while read -r test_file; do + echo "Running: $test_file" + bats "$test_file" || true done fi @@ -153,9 +156,9 @@ jobs: - name: Run ShellCheck run: | - echo "Running ShellCheck on STP scripts..." + echo "Running ShellCheck on Intent scripts..." # Run shellcheck on all shell scripts, but don't fail the build - find stp/bin -type f -name "stp*" | while read -r script; do + find bin -type f -name "intent*" | while read -r script; do if file "$script" | grep -q "shell script"; then echo "Checking: $script" shellcheck "$script" || echo " ShellCheck found issues (non-blocking)" diff --git a/.intent/config.json b/.intent/config.json new file mode 100644 index 0000000..6d704a2 --- /dev/null +++ b/.intent/config.json @@ -0,0 +1,19 @@ +{ + "version": "2.3.2", + "intent_version": "2.3.2", + "project_name": "Intent", + "author": "matts", + "created": "2025-07-16", + "st_prefix": "ST", + "backlog_dir": "backlog", + "intent_dir": "intent", + "backlog_list_status": "todo", + "plugins": { + "claude": { + "subagents_path": "intent/plugins/claude/subagents" + }, + "agents": { + "config_path": "intent/plugins/agents" + } + } +} diff --git a/.stp_backup/1.2.1/ST0001.md b/.stp_backup/1.2.1/ST0001.md deleted file mode 100644 index af02b5c..0000000 --- a/.stp_backup/1.2.1/ST0001.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0001: Directory Structure - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: LLM Assisted - -## Objective - -Create the initial directory structure and placeholder files for the Steel Thread Process (STP) system. - -## Context - -The STP system requires a specific directory structure to organize templates, scripts, and documentation. This steel thread will establish the foundation upon which the rest of the system will be built. - -## Approach - -1. Create the primary directories based on the specification -2. Create placeholder files to maintain directory structure -3. Set up the necessary template directory structure -4. Document the directory layout for future reference - -## Tasks - -- [x] Create root level directory structure -- [x] Create subdirectories for each component -- [x] Create placeholder files for templates -- [x] Document directory structure in the technical product design -- [ ] Create symbolic links or references between related directories -- [ ] Validate directory structure against requirements - -## Implementation Notes - -### Directory Structure Created - -The following directory structure has been established: - -``` -STP/ -├── stp/ # Main STP directory -│ ├── _templ/ # Templates directory -│ │ ├── prj/ # Project document templates -│ │ │ ├── _wip.md -│ │ │ ├── _journal.md -│ │ │ └── st/ -│ │ │ ├── _steel_threads.md -│ │ │ └── _ST####.md -│ │ ├── eng/ # Engineering document templates -│ │ │ └── tpd/ -│ │ │ ├── _technical_product_design.md -│ │ │ ├── _1_introduction.md -│ │ │ ├── ... -│ │ ├── usr/ # User document templates -│ │ │ ├── _user_guide.md -│ │ │ ├── _reference_guide.md -│ │ │ └── _deployment_guide.md -│ │ └── llm/ # LLM document templates -│ │ └── _llm_preamble.md -│ ├── bin/ # STP scripts -│ │ ├── .help # Help for each STP command -│ │ ├── stp # Main STP command -│ │ ├── stp_init # Init command implementation -│ │ ├── stp_st # Steel thread command implementation -│ │ ├── stp_help # Help command implementation -│ │ └── ... # Other command implementations -│ ├── prj/ # Project documentation -│ │ ├── st/ # Steel threads -│ │ │ ├── steel_threads.md # Steel thread index -│ │ │ ├── ST0001.md # Individual steel thread -│ │ │ └── ... -│ │ ├── wip.md # Work in progress -│ │ └── journal.md # Project journal -│ ├── eng/ # Engineering docs -│ │ └── tpd/ # Technical Product Design -│ │ ├── technical_product_design.md # Main TPD document -│ │ ├── 1_introduction.md # TPD sections -│ │ └── ... -│ ├── usr/ # User documentation -│ │ ├── user_guide.md -│ │ ├── reference_guide.md -│ │ └── deployment_guide.md -│ └── llm/ # LLM-specific content -│ ├── llm_preamble.md -│ └── *.prompt.md # Canned prompts -├── bin/ # Executable scripts (outside the stp structure) -``` - -### Naming Conventions - -- All templates begin with an underscore (_) -- All steel thread documents follow the pattern ST####.md (with 4-digit IDs) -- All scripts follow the pattern stp_command - -### Template Organization - -Templates are organized to mirror the actual directory structure where the instantiated files will reside. This makes it easier to understand the relationship between templates and their final locations. - -## Results - -The directory structure for the STP system was successfully implemented with all planned components: - -- Created the main directory hierarchy for documentation, templates, and scripts -- Established clear separation between template files and active project files -- Created placeholder files to maintain directory structure -- Implemented a logical organization that supports the STP workflow -- Added appropriate naming conventions for consistency - -The directory structure provides a solid foundation for the STP system, enabling all other steel threads to build upon this organizational framework. The structure is intuitive for users and supports the various document types and workflows necessary for the system. - -## Related Steel Threads - -- ST0002: Core Script Framework -- ST0003: Template System diff --git a/.stp_backup/1.2.1/ST0002.md b/.stp_backup/1.2.1/ST0002.md deleted file mode 100644 index ed19915..0000000 --- a/.stp_backup/1.2.1/ST0002.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0002: Core Script Framework - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: LLM Assisted - -## Objective - -Implement the main `stp` script and command dispatching system that forms the core of the STP command-line interface. - -## Context - -The STP system needs a command-line interface to manage steel threads, initialize projects, and provide help. This steel thread focuses on creating the core script framework that will dispatch commands to their specific implementations. - -## Approach - -1. Create a main `stp` script that handles command dispatching -2. Implement environment variable handling and script location detection -3. Create a modular framework for command implementations -4. Implement error handling and usage information -5. Create a help system for displaying command documentation - -## Tasks - -- [x] Implement main `stp` script with command dispatching -- [x] Implement environment variable handling -- [x] Implement error handling framework -- [x] Create the help system script -- [x] Create help documentation for core commands -- [x] Implement the init script for project initialization -- [x] Implement the steel thread management script -- [x] Test script functionality in various environments -- [x] Add verbose output mode for debugging - -## Implementation Notes - -### Core Script Design - -The core script framework follows a modular design where: - -1. The main `stp` script: - - Validates input parameters - - Determines the STP_HOME directory - - Dispatches to the appropriate command implementation - - Handles basic error conditions - -2. Command implementations: - - Each command is implemented in a separate script named `stp_` - - Commands receive parameters directly from the main script - - Commands handle their own parameter validation - - Commands provide specific error messages - -3. Help system: - - Implemented in `stp_help` - - Reads help documentation from `.help` directory - - Provides both general and command-specific help - -### Script Environment - -The scripts use environment variables to maintain configuration: - -- `STP_HOME`: Location of the STP installation -- `STP_PROJECT`: Current project name -- `STP_AUTHOR`: Default author name -- `STP_EDITOR`: Preferred text editor - -The main script can determine `STP_HOME` automatically if not set. - -### Error Handling - -Error handling follows these principles: - -- Exit with non-zero status on error -- Provide clear error messages to stderr -- Check prerequisites before operations -- Validate input parameters -- Handle script permissions issues - -### Help Documentation - -Help documentation follows a standard format with sections: - -- `@short`: Brief one-line description -- `@desc`: Detailed description -- `@usage`: Usage information, parameters, and examples - -## Results - -The core script framework was successfully implemented with all planned components: - -- Created a modular command dispatching system -- Implemented environment variable handling and configuration -- Created a robust error handling framework -- Built a help system for documentation access -- Implemented core commands (init, st, help) -- Tested functionality across different environments -- Added verbose mode for debugging and troubleshooting - -The implementation provides a solid foundation for the STP command-line interface, with a modular design that makes it easy to add new commands and extend functionality. The error handling is robust, providing clear messages to users when issues occur. The command dispatching system efficiently routes commands to their specific implementation scripts, maintaining a clean separation of concerns. - -## Related Steel Threads - -- ST0001: Directory Structure -- ST0004: Steel Thread Commands -- ST0005: Initialization Command -- ST0006: Help System diff --git a/.stp_backup/1.2.1/ST0003.md b/.stp_backup/1.2.1/ST0003.md deleted file mode 100644 index 815de66..0000000 --- a/.stp_backup/1.2.1/ST0003.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0003: Template System - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Create a template system for STP that provides standardized starting points for all document types and ensures consistency across the project. - -## Context - -Templates are essential for maintaining consistency in documentation and providing users with immediate guidance on what information to include. This steel thread implements the template system for STP, creating templates for project documents, engineering documents, user documents, and steel threads themselves. - -## Approach - -1. Identify all document types that require templates -2. Create template files with placeholder content and instructions -3. Design a template directory structure that mirrors the final document structure -4. Implement consistent formatting and styles across all templates -5. Add LLM-specific guidance within templates - -## Tasks - -- [x] Design template directory structure -- [x] Create project document templates (journal, WIP, steel threads) -- [x] Create engineering document templates (technical product design) -- [x] Create user document templates (user guide, reference guide, deployment guide) -- [x] Add LLM-specific guidance sections to all templates -- [x] Test template instantiation process -- [x] Document template system in the technical product design - -## Implementation Notes - -### Template Organization - -Templates are organized in a directory structure that mirrors their final location: - -``` -_templ/ -├── prj/ # Project document templates -│ ├── _wip.md -│ ├── _journal.md -│ └── st/ -│ ├── _steel_threads.md -│ └── _ST####.md -├── eng/ # Engineering document templates -│ └── tpd/ -│ ├── _technical_product_design.md -│ ├── _1_introduction.md -│ └── ... -├── usr/ # User document templates -│ ├── _user_guide.md -│ ├── _reference_guide.md -│ └── _deployment_guide.md -└── llm/ # LLM document templates - └── _llm_preamble.md -``` - -### Template Design Principles - -1. All templates begin with an underscore (_) to distinguish them from actual documents -2. Templates include placeholders marked with [brackets] to indicate information that needs to be filled in -3. Each template includes guidance text that explains its purpose and how to complete it -4. Templates for modular documents (like technical product design) are split into logical sections -5. All templates include a version block section at the top for tracking changes - -### LLM Integration - -Each template includes a "Context for LLM" section that provides: -- The purpose of the document type -- Instructions for updating and maintaining the document -- Guidance on what information to include in each section -- Related documents that may be relevant - -## Results - -The template system was successfully implemented with templates for all document types. The system provides: - -- Consistent document formats across the project -- Clear guidance to users on what information to include -- LLM-specific sections to aid in document generation and maintenance -- A logical organization that mirrors the final document structure - -The template system serves as a strong foundation for the STP project, ensuring documentation consistency and completeness. - -## Related Steel Threads - -- ST0001: Directory Structure -- ST0002: Core Script Framework -- ST0005: Initialization Command - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0004.md b/.stp_backup/1.2.1/ST0004.md deleted file mode 100644 index f7e6964..0000000 --- a/.stp_backup/1.2.1/ST0004.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0004: Steel Thread Commands - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Implement the steel thread command subsystem (`stp st`) to enable users to create, manage, and track steel threads throughout a project lifecycle. - -## Context - -Steel threads are a core concept in the STP system, representing discrete units of work that can be tracked and documented. This steel thread implements the command-line interface for managing steel threads, including creating new threads, listing existing threads, marking threads as complete, and viewing thread details. - -## Approach - -1. Design a command-line interface for managing steel threads -2. Implement the core `stp_st` script with subcommands -3. Create templates for steel thread documents -4. Implement a steel thread index for tracking all threads -5. Ensure user-friendly error handling and documentation - -## Tasks - -- [x] Design the command-line interface for `stp st` -- [x] Implement the `stp st new` command for creating new steel threads -- [x] Implement the `stp st list` command for listing all steel threads -- [x] Implement the `stp st show` command for viewing a specific thread -- [x] Implement the `stp st done` command for marking threads as complete -- [x] Implement the `stp st edit` command for opening threads in an editor -- [x] Create a steel thread index file and update mechanism -- [x] Add error handling and user-friendly messages -- [x] Write documentation for the steel thread commands - -## Implementation Notes - -### Command Structure - -The steel thread command subsystem includes the following commands: - -- `stp st new ` - Create a new steel thread with the given title -- `stp st list [--status <status>]` - List all steel threads, optionally filtered by status -- `stp st show <id>` - Display the contents of a specific steel thread -- `stp st done <id>` - Mark a steel thread as complete -- `stp st edit <id>` - Open a steel thread in the default editor - -### Steel Thread ID Format - -Steel threads follow a consistent ID format: -- IDs are in the format ST#### (e.g., ST0001) -- Numbers are padded to 4 digits with leading zeros -- IDs are automatically assigned in sequence - -To improve usability, commands accept abbreviated IDs: -- Just the number (e.g., `1`) -- The number with or without leading zeros (e.g., `0001`) -- The full ID (e.g., `ST0001`) - -### Steel Thread Index - -The system maintains a steel thread index file (`steel_threads.md`) that: -- Lists all steel threads with their status, creation date, and completion date -- Is automatically updated when creating or marking threads as complete -- Provides a Markdown table for easy viewing -- Includes links to the individual steel thread files - -### Cross-Platform Support - -The `edit` command is designed to work across platforms: -- Uses `open` on macOS -- Uses `xdg-open` on Linux -- Uses `start` on Windows -- Falls back to environment variables or vi as a last resort - -## Results - -The steel thread command subsystem was successfully implemented with all planned functionality. The system provides: - -- A user-friendly interface for managing steel threads -- Automatic generation of steel thread documents from templates -- Consistent tracking of steel thread status and metadata -- Cross-platform support for editing and viewing threads -- A formatted display of steel thread listings with proper headers - -The implementation helps enforce consistency in steel thread documentation while making it easy for users to create and manage threads throughout a project lifecycle. - -## Related Steel Threads - -- ST0001: Directory Structure -- ST0002: Core Script Framework -- ST0003: Template System -- ST0006: Help System - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0005.md b/.stp_backup/1.2.1/ST0005.md deleted file mode 100644 index d62f081..0000000 --- a/.stp_backup/1.2.1/ST0005.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0005: Initialization Command - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Implement the initialization command (`stp init`) that sets up a new STP project structure with all necessary directories and template files. - -## Context - -The initialization command is a critical component of the STP system, allowing users to quickly bootstrap a new project with the correct directory structure and template files. This steel thread implements the `stp init` command and its supporting functionality. - -## Approach - -1. Design the initialization process workflow -2. Implement the `stp_init` script to create the directory structure -3. Add functionality to copy template files to their appropriate locations -4. Create configuration file management -5. Add customization options for project-specific settings - -## Tasks - -- [x] Design the directory structure to be created during initialization -- [x] Implement the basic `stp init` command -- [x] Add logic to copy templates from the template directory -- [x] Create a configuration file for project-specific settings -- [x] Add support for customizing project metadata during initialization -- [x] Add verification to prevent re-initialization of existing projects -- [x] Implement error handling and user feedback -- [x] Document the initialization process -- [x] Test initialization on different operating systems - -## Implementation Notes - -### Directory Structure Creation - -The initialization command creates the following directory structure: - -``` -project/ -├── prj/ # Project documentation -│ ├── st/ # Steel threads -│ │ └── steel_threads.md -│ ├── wip.md # Work in progress -│ └── journal.md # Project journal -├── eng/ # Engineering docs -│ └── tpd/ # Technical Product Design -│ ├── technical_product_design.md -│ ├── 1_introduction.md -│ └── ... -├── usr/ # User documentation -│ ├── user_guide.md -│ ├── reference_guide.md -│ └── deployment_guide.md -└── llm/ # LLM-specific content - └── llm_preamble.md -``` - -### Template Instantiation - -During initialization, the system: -1. Creates all required directories -2. Copies template files from the template directory to their respective locations -3. Removes the leading underscore from template filenames -4. Populates project-specific metadata in templates (project name, date, author, etc.) - -### Configuration Management - -The initialization process creates a `.stp-config` file that contains: -- Project name -- Project creation date -- Author information -- Project-specific settings -- Paths to important directories and files - -This configuration file is used by other STP commands to locate resources and customize behavior. - -### User Interaction - -The initialization command: -- Prompts for project name if not provided as an argument -- Automatically detects the current user as the author (can be overridden) -- Provides clear feedback during initialization -- Warns if attempting to initialize an existing project -- Displays a success message with next steps after completion - -## Results - -The initialization command was successfully implemented with all planned functionality. The system: - -- Creates a complete project structure with all necessary directories -- Instantiates templates with project-specific information -- Creates a configuration file for use by other STP commands -- Provides a smooth user experience with appropriate feedback -- Ensures consistency in project structure across different projects - -The implementation significantly reduces the time required to set up a new project and ensures that all projects follow a consistent structure. - -## Related Steel Threads - -- ST0001: Directory Structure -- ST0002: Core Script Framework -- ST0003: Template System -- ST0006: Help System - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0006.md b/.stp_backup/1.2.1/ST0006.md deleted file mode 100644 index 3956260..0000000 --- a/.stp_backup/1.2.1/ST0006.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0006: Help System - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Implement a comprehensive help system for STP that provides users with command-specific guidance, usage examples, and general information about the system. - -## Context - -A good help system is essential for improving user experience and adoption. This steel thread implements the help system for STP, including the `stp help` command, command-specific help, and a methodology for maintaining help documentation alongside the code. - -## Approach - -1. Design the help system architecture -2. Create a help file format and structure -3. Implement the `stp help` command and its subcommands -4. Create help files for all existing commands -5. Ensure the help system is extensible for future commands - -## Tasks - -- [x] Design the help file format and structure -- [x] Implement the `stp help` command -- [x] Implement command-specific help (e.g., `stp help init`) -- [x] Create help files for all existing commands -- [x] Add functionality to list all available commands -- [x] Implement dynamic discovery of commands for help listings -- [x] Add examples and usage scenarios to help documentation -- [x] Document the help system and how to extend it -- [x] Test help system with various commands and scenarios - -## Implementation Notes - -### Help File Structure - -Help files are stored in the `.help` directory and follow a consistent format: - -- Filename pattern: `command.help.md` -- Structure: - ``` - @short: Brief one-line description - @desc: - Detailed multi-line description - - @usage: - command [options] <arguments> - - @examples: - command example1 - command example2 - ``` - -### Help Command Implementation - -The `stp help` command: -1. Without arguments, displays a list of all available commands with short descriptions -2. With a command argument (e.g., `stp help init`), displays detailed help for that command -3. Dynamically discovers available commands by scanning for `stp_*` scripts - -### Dynamic Command Discovery - -The system: -- Scans the bin directory for `stp_*` scripts to identify available commands -- Extracts short descriptions from corresponding help files -- Formats the output in a consistent and readable way -- Handles the case of missing help files gracefully - -### Multi-line Description Handling - -The help system properly formats multi-line descriptions, maintaining: -- Proper indentation -- Paragraph structure -- Code blocks and examples -- Lists and other formatting - -## Results - -The help system was successfully implemented with all planned functionality. The system: - -- Provides clear, concise help for all STP commands -- Supports both general help and command-specific detailed help -- Dynamically discovers commands, making it extensible as new commands are added -- Maintains a consistent format across all help documentation -- Improves user experience by providing usage examples and clear instructions - -The implementation significantly improves usability by making it easy for users to learn how to use the system and discover available functionality. - -## Related Steel Threads - -- ST0002: Core Script Framework -- ST0004: Steel Thread Commands -- ST0005: Initialization Command -- ST0007: User Documentation - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0007.md b/.stp_backup/1.2.1/ST0007.md deleted file mode 100644 index 910e9b2..0000000 --- a/.stp_backup/1.2.1/ST0007.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0007: User Documentation - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Create comprehensive user documentation for the STP system, including a user guide, reference guide, and deployment guide. - -## Context - -Good documentation is critical for tool adoption and effective use. This steel thread focuses on creating user-focused documentation that explains how to use STP, its features, and how to customize it for different project needs. - -## Approach - -1. Identify key documentation needs for different user types -2. Create templates for all documentation types -3. Write comprehensive documentation for all STP features -4. Include examples, use cases, and best practices -5. Organize documentation in a logical, accessible manner - -## Tasks - -- [x] Create a documentation template structure -- [x] Write the user guide covering basic usage -- [x] Write the reference guide documenting all commands and options -- [x] Write the deployment guide for system administrators -- [x] Add usage examples for common scenarios -- [x] Include troubleshooting information -- [x] Document customization options -- [x] Test documentation clarity with sample users -- [x] Implement feedback from documentation testing - -## Implementation Notes - -### Documentation Structure - -The user documentation is divided into three main documents: - -1. **User Guide**: Focuses on getting started and common tasks - - Introduction to STP - - Installation and setup - - Basic workflows - - Best practices - - Getting help - -2. **Reference Guide**: Provides detailed information on all commands - - Complete command reference - - Configuration options - - File formats - - Template customization - - Advanced usage - -3. **Deployment Guide**: For system administrators and team leads - - System requirements - - Installation options - - Team configuration - - Integration with other tools - - Maintenance tasks - -### Documentation Design Principles - -The documentation follows these principles: - -1. **Task-oriented**: Organized around user tasks rather than system features -2. **Progressive disclosure**: Basic information first, with links to more advanced topics -3. **Consistent structure**: Each section follows a consistent format -4. **Examples-rich**: Every feature includes practical examples -5. **Visual aids**: Diagrams and screenshots where helpful - -### Cross-References - -The documentation maintains consistent cross-references: - -- Between documents (e.g., from user guide to reference guide) -- To specific command help documentation -- To steel thread documents for technical details -- To external resources where appropriate - -## Results - -The user documentation was successfully created with all planned components. The documentation: - -- Provides clear guidance for new users to get started -- Offers detailed reference information for advanced users -- Includes examples and best practices for common scenarios -- Is organized logically for easy navigation -- Balances brevity with completeness -- Accommodates different learning styles and needs - -The implementation helps users adopt and effectively use the STP system, reducing the learning curve and improving productivity. - -## Related Steel Threads - -- ST0003: Template System -- ST0006: Help System -- ST0008: LLM Integration - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0008.md b/.stp_backup/1.2.1/ST0008.md deleted file mode 100644 index bb718dd..0000000 --- a/.stp_backup/1.2.1/ST0008.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0008: LLM Integration - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Integrate Large Language Model (LLM) capabilities into the STP system to assist with document generation, code creation, and project management tasks. - -## Context - -LLMs offer powerful capabilities for automating and enhancing many aspects of software development and documentation. This steel thread implements LLM integration into STP, allowing users to leverage AI assistance for various tasks while maintaining human oversight and quality control. - -## Approach - -1. Identify key integration points for LLM assistance -2. Design a prompt engineering system specific to STP -3. Create LLM-specific sections in templates -4. Implement LLM preamble files to provide consistent context -5. Develop documentation for effective LLM use within STP - -## Tasks - -- [x] Create an LLM preamble file structure -- [x] Add LLM context sections to all templates -- [x] Develop standard prompts for common tasks -- [x] Implement LLM-aware document formatting -- [x] Create guidance for effective prompt engineering -- [x] Document LLM integration features -- [x] Test LLM effectiveness with various tasks -- [x] Develop best practices for human-LLM collaboration -- [x] Implement feedback from LLM integration testing - -## Implementation Notes - -### LLM Integration Points - -The STP system integrates LLMs at several key points: - -1. **Document Generation**: Templates include LLM-specific context sections that help guide the model in generating appropriate content -2. **Steel Thread Management**: LLMs can assist in planning and documenting steel threads -3. **Technical Design**: LLMs can help with creating and refining technical product design documents -4. **Code Generation**: LLMs can assist with implementation tasks based on steel thread documentation - -### LLM Preamble System - -The system uses a standardized preamble approach: - -1. A base LLM preamble file (`llm_preamble.md`) provides project context -2. Document-specific context sections explain the purpose and structure of each document type -3. Task-specific prompts guide the LLM for particular activities - -### LLM-Aware Document Structure - -Documents are structured to be LLM-friendly: - -1. Clear section headings and hierarchical organization -2. Explicit placeholders and instructions -3. Context sections that are hidden from final rendered output -4. Semantic organization that helps the LLM understand document relationships - -### Prompt Engineering Guidance - -The implementation includes: - -1. Example prompts for common tasks -2. Guidance on prompt construction -3. Strategies for effective LLM collaboration -4. Troubleshooting techniques for common LLM challenges - -## Results - -The LLM integration was successfully implemented with all planned components. The integration: - -- Provides consistent context to LLMs for better generation results -- Streamlines document creation and maintenance tasks -- Offers guidance to users on effective LLM collaboration -- Maintains human oversight and quality control -- Reduces time spent on repetitive documentation tasks -- Improves document consistency and completeness - -The implementation significantly enhances the productivity of STP users by providing AI assistance while maintaining appropriate human control over the final output. - -## Related Steel Threads - -- ST0003: Template System -- ST0004: Steel Thread Commands -- ST0007: User Documentation -- ST0009: Process Refinement - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0009.md b/.stp_backup/1.2.1/ST0009.md deleted file mode 100644 index ab7c05d..0000000 --- a/.stp_backup/1.2.1/ST0009.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250306 -completed: 20250603 ---- -# ST0009: Process Refinement - -- **Status**: Completed -- **Created**: 2025-03-06 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Refine the STP process based on user feedback and practical experience, improving workflows, commands, and documentation to enhance usability and effectiveness. - -## Context - -After implementing the core STP functionality, this steel thread focuses on refining the system based on real-world usage, addressing pain points, and implementing improvements to make the system more intuitive and effective for users. - -## Approach - -1. Gather feedback from initial STP users -2. Identify common pain points and improvement opportunities -3. Prioritize refinements based on impact and implementation effort -4. Implement improvements iteratively -5. Test refinements with users and gather additional feedback - -## Tasks - -- [x] Create a feedback collection mechanism -- [x] Analyze user workflows and identify friction points -- [x] Improve command interfaces for better usability -- [x] Refine error messages and user feedback -- [x] Enhance steel thread management workflow -- [x] Streamline initialization process -- [x] Improve template usability -- [x] Optimize LLM integration -- [x] Update documentation to reflect process improvements -- [x] Test refinements with users - -## Implementation Notes - -### Workflow Improvements - -Several key workflow improvements were implemented: - -1. **Abbreviated ID Support**: Steel thread commands now accept abbreviated IDs (e.g., `stp st show 1` instead of `stp st show ST0001`) -2. **Edit Command**: Added `stp st edit` command to quickly open steel thread files in the default editor -3. **Improved Listing**: Enhanced the steel thread listing format with clear headers and formatting -4. **Status Filtering**: Added ability to filter steel threads by status - -### Command Interface Refinements - -The command interfaces were refined with: - -1. **Consistent Parameter Handling**: Standardized parameter parsing across all commands -2. **Better Error Messages**: More descriptive error messages with suggested solutions -3. **Intelligent Defaults**: Added smart defaults to reduce required input -4. **Cross-platform Support**: Improved compatibility across different operating systems - -### Documentation Enhancements - -Documentation was enhanced with: - -1. **More Examples**: Added additional examples for common tasks -2. **Workflow Guidance**: Included guidance on typical workflows -3. **Updated Screenshots**: Added visual aids for key operations -4. **Troubleshooting Section**: Created a dedicated troubleshooting guide - -### LLM Optimizations - -LLM integration was optimized with: - -1. **Refined Prompts**: Improved standard prompts based on usage patterns -2. **Context Optimization**: Streamlined context information for better generation -3. **Workflow-specific Guidance**: Added LLM guidance tailored to specific workflows -4. **Feedback Incorporation**: Adjusted LLM systems based on user feedback - -## Results - -The process refinement effort significantly improved the STP system, resulting in: - -- More intuitive and efficient user workflows -- Reduced friction in common tasks -- Better error handling and user feedback -- Enhanced cross-platform compatibility -- More comprehensive and accessible documentation -- Improved LLM assistance through better prompts and context - -The refinements have made the system more user-friendly and effective, addressing the key pain points identified through user feedback while maintaining the core functionality and purpose of STP. - -## Related Steel Threads - -- ST0002: Core Script Framework -- ST0004: Steel Thread Commands -- ST0006: Help System -- ST0008: LLM Integration - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0010.md b/.stp_backup/1.2.1/ST0010.md deleted file mode 100644 index 7349582..0000000 --- a/.stp_backup/1.2.1/ST0010.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: On Hold -created: 20250603 ---- -# ST0010: Anthropic MCP Integration - -- **Status**: On Hold -- **Created**: 2025-06-03 -- **Completed**: -- **Author**: Matthew Sinclair - -## Objective - -Explore and implement the potential use of Anthropic's Machine Control Protocol (MCP) to enable STP scripts to interact with LLMs in a more robust and controlled manner. - -## Context - -Several STP commands could benefit from LLM integration, such as "stp st done STID" and other operations that might require AI assistance. Currently, these interactions are not standardized. Using MCP could provide a more structured and reliable way for STP scripts to leverage LLM capabilities programmatically. - -This steel thread explores whether an MCP implementation (or proxy) could allow STP scripts to more effectively control LLM interactions, potentially enabling the LLM to call itself in a parameterized way from within the STP framework. - -## Approach - -1. Research Anthropic's MCP specification and implementation requirements -2. Identify STP commands that would benefit from LLM integration -3. Design a lightweight MCP integration layer for STP scripts -4. Prototype the integration with at least one STP command (e.g., "stp st done STID") -5. Evaluate effectiveness and limitations -6. Document the approach and implementation details - -## Tasks - -- [ ] Research and document current MCP capabilities and limitations -- [ ] Analyze STP commands to identify candidates for MCP integration -- [ ] Design MCP integration architecture for STP -- [ ] Implement prototype for at least one command (e.g., "stp st done") -- [ ] Test the implementation with various scenarios -- [ ] Document the implementation and usage guidelines -- [ ] Create examples of MCP-enhanced STP commands - -## Implementation Notes - -[Notes on implementation details, decisions, challenges, and their resolutions] - -## Results - -[Summary of results after completion, including outcomes, lessons learned, and any follow-up work needed] - -## Related Steel Threads - -- ST0002: Core Script Framework -- ST0004: Steel Thread Commands - -## Context for LLM - -This steel thread explores the integration of Anthropic's Machine Control Protocol (MCP) into the STP system to enhance LLM interactions within STP scripts. This is a low-priority exploration for now that can be implemented later after more critical components are in place. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0011.md b/.stp_backup/1.2.1/ST0011.md deleted file mode 100644 index 5d29267..0000000 --- a/.stp_backup/1.2.1/ST0011.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Not Started -created: 20250603 -completed: ---- -# ST0011: Test Suite Implementation - -- **Status**: Completed -- **Created**: 2025-06-03 -- **Completed**: 2025-06-03 -- **Author**: Matthew Sinclair - -## Objective - -Build a comprehensive test suite for the STP project to verify that bootstrap, initialization, and other core functions work as expected across different environments and scenarios. - -## Context - -As the STP project grows, it becomes increasingly important to have automated tests to validate that the system works correctly. This steel thread focuses on creating a test framework that can verify the functionality of critical STP components like bootstrap and init scripts, ensuring they behave correctly across different environments and edge cases. - -A robust test suite will help maintain the reliability of STP as new features are added and existing ones are modified, reducing the risk of regressions and making it easier to identify and fix issues early. - -## Approach - -1. Design a test framework that can run in isolated environments -2. Implement tests for core functionality, starting with bootstrap and init -3. Create test fixtures and mock environments as needed -4. Implement test reporting and result analysis -5. Document the test suite and how to run it -6. Integrate with existing workflows - -## Tasks -Tasks are tracked in Backlog. View with: `stp task list ST0011` - -## Implementation Notes - -### Testing Framework Selection - -After researching available testing frameworks for shell scripts, the following options were evaluated: - -1. **Bats (Bash Automated Testing System)** - - Pros: TAP-compliant output, well-documented, widely used, supports setup/teardown, good assertion library - - Cons: Requires additional dependency installation - -2. **shUnit2** - - Pros: Pure shell implementation, simple to use, no dependencies - - Cons: Less feature-rich than Bats, less active development - -3. **Assert.sh** - - Pros: Very lightweight, easy to use - - Cons: Limited features, primarily for assertions only - -4. **Roundup** - - Pros: Simple syntax, focused on describing test cases - - Cons: Less active development - -5. **Shell-Spec** - - Pros: BDD-style syntax, good for behavior testing - - Cons: Steeper learning curve, less community adoption - -**Decision**: Bats is the recommended framework for STP testing due to its robust feature set, active development, and widespread adoption. Its TAP output also makes it easy to integrate with CI systems. - -### Test Suite Architecture - -The test suite was implemented with the following structure: - -``` -stp/tests/ -├── README.md # Documentation for the test suite -├── lib/ -│ └── test_helper.bash # Common test helper functions -├── bootstrap/ -│ └── bootstrap_test.bats # Tests for bootstrap script -├── init/ -│ └── init_test.bats # Tests for init command -├── st/ -│ └── st_test.bats # Tests for steel thread commands -├── fixtures/ # Test fixtures and test data -├── run_tests.sh # Script to run all tests -└── setup_test_env.sh # Script to set up the test environment -``` - -The architecture follows these design principles: - -1. **Modularity**: Tests are organized by component being tested -2. **Isolated Environments**: Each test runs in its own temporary directory -3. **Common Test Helpers**: Shared functions are in a central helper file -4. **Comprehensive Coverage**: Tests cover all major functionality -5. **Self-Contained**: Setup scripts ensure dependencies are installed - -### Test Helper Implementation - -A comprehensive test helper module was created that provides: - -1. **Environment Setup**: Creates isolated test environments -2. **Custom Assertions**: Specialized assertions for file system operations -3. **Mock Functions**: Ability to mock commands and environment variables -4. **Temporary Directory Management**: Creates and cleans up temporary test directories - -### Test Coverage - -The implemented tests provide coverage for: - -1. **Bootstrap Script**: Tests for directory structure creation, file creation, and author attribution -2. **Init Command**: Tests for project initialization with various parameters and edge cases -3. **Steel Thread Commands**: Tests for creating, listing, showing, and completing steel threads - -### Test Execution and Reporting - -A dedicated `stp/tests/run_tests.sh` script was created that: - -1. Checks for test dependencies -2. Optionally installs missing components -3. Provides colorized output of test results -4. Supports running all tests or specific test suites -5. Generates clear error messages for failed tests - -To run the tests, users must navigate to the tests directory: - -```bash -cd stp/tests/ -./run_tests.sh # Run all tests -./run_tests.sh bootstrap # Run only bootstrap tests -``` - -## Results - -### Current Status (Partial Implementation) - -The test suite has been successfully implemented with the following components: - -1. **Directory Structure**: - - Created an organized test directory structure with separate sections for components - - Implemented a fixtures directory for test data - - Set up a lib directory for shared testing functionality - -2. **Test Helper Library**: - - Created a comprehensive test_helper.bash with common functions - - Implemented isolation between tests using temporary directories - - Added custom assertions for file system verification - - Created mock object functionality for testing environmental dependencies - -3. **Component Tests**: - - Implemented bootstrap_test.bats with 11 individual tests for the bootstrap script - - Implemented init_test.bats with 8 individual tests for the init command - - Implemented st_test.bats with 10 individual tests for the steel thread commands - - Implemented help_test.bats with 6 individual tests for the help command - - Implemented main_test.bats with 6 individual tests for the main stp script - -4. **Test Runner**: - - Created run_tests.sh to execute all tests or specific test suites - - Added colorized output for better readability - - Added error reporting and success messages - - Fixed bug to exclude library test files from test runs - -5. **Test Environment Setup**: - - Created setup_test_env.sh to install test dependencies - - Added support for library installation - - Created functionality for adapting to different installation configurations - - Added .gitignore file to exclude test libraries from source control - -### Remaining Work - -The following work is still needed to complete this steel thread: - -1. **Continuous Integration**: - - Set up CI configuration for automated testing - - Create CI workflow definition - - Configure test reporting and notification - -2. **Additional Test Coverage**: - - Add tests for edge cases and error handling - - Create additional tests for LLM integration features - - Add performance tests - -3. **Documentation Updates**: - - Update the technical product design with test suite information - - Create user documentation for running and extending tests - - Document test patterns and best practices - -### Lessons Learned - -1. Bash script testing requires careful isolation of the test environment -2. Mocking and simulation are essential for testing filesystem operations -3. A comprehensive test helper library significantly reduces test code duplication -4. Temporary directory management is critical for clean test runs -5. Support for different environments requires flexible path handling -6. Testing interactive scripts requires special handling, like using the `expect` utility -7. String pattern matching in tests needs escaping for special characters (like asterisks) -8. Exclude test library tests from your test runs to avoid conflicts -9. A well-structured .gitignore file helps keep test dependencies out of source control - -## Related Steel Threads - -- ST0001: Directory Structure -- ST0002: Core Script Framework -- ST0005: Initialization Command - -## Context for LLM - -This steel thread focuses on creating a comprehensive test suite for the STP project. Testing shell scripts presents unique challenges, and this work will establish patterns for effective testing of STP components. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0012.md b/.stp_backup/1.2.1/ST0012.md deleted file mode 100644 index d859cda..0000000 --- a/.stp_backup/1.2.1/ST0012.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 -status: Completed -created: 20250307 -completed: 20250307 ---- -# ST0012: Document Sync Command - -- **Status**: Completed -- **Created**: 2025-03-07 -- **Completed**: 2025-03-07 -- **Author**: Matthew Sinclair - -## Objective - -Create a new `stp st sync` command that will maintain the steel_threads.md document by synchronizing it with the current state of individual steel thread files. - -## Context - -Currently, the `stp/prj/st/steel_threads.md` document needs to be manually kept in sync with the individual ST####.md files. The `stp st list` command now reads directly from the ST files, but the summary document needs to be updated separately. - -This causes inconsistencies when steel thread status changes or when new steel threads are added. A mechanism is needed to ensure the summary document accurately reflects the current state of all steel threads. - -## Approach - -1. Create a new `sync` option in the `stp_st` script -2. Add support for section markers in the steel_threads.md document -3. Read metadata from each ST####.md file -4. Generate updated content for the marked sections in steel_threads.md -5. Either output the updated content to stdout or write it to the file based on options - -## Tasks - -- [x] Add section markers to the steel_threads.md document -- [x] Create a new `sync` subcommand in the `stp_st` script -- [x] Implement logic to extract metadata from ST files -- [x] Add functionality to update sections between markers -- [x] Add `--write` option for file updates -- [x] Add `--width` option for configurable table formatting -- [x] Add documentation for the new command -- [x] Update tests to ensure proper functionality - -## Implementation Notes - -The implementation uses section markers in the form of HTML-like comments to identify the parts of the document that should be updated: - -``` -<!-- BEGIN: STEEL_THREAD_INDEX --> -(content will be replaced during sync) -<!-- END: STEEL_THREAD_INDEX --> -``` - -This allows for automatic updates to specific sections while preserving the rest of the document. - -### Key Implementation Details - -1. **New `sync` Command**: Added a new `sync` subcommand to the `stp_st` script that reads the individual steel thread files and updates the steel_threads.md document. - -2. **Metadata Extraction**: The command extracts metadata (status, created date, completed date) from individual ST####.md files using both: - - Metadata in the YAML frontmatter (with keys `status`, `created`, and `completed`) - - Information in the document body (with lines like `- **Status**: In Progress`) - -3. **Section Markers**: Added HTML-style comment markers to designate sections in the steel_threads.md file that can be automatically updated. - -4. **Configurable Output**: Added options to customize the output: - - `--write`: Updates the steel_threads.md file directly - - `--width N`: Specifies the width of the output table for proper column alignment - -5. **Format Consistency**: Ensures proper table formatting and column alignment for better readability. - -This approach makes the system more maintainable by ensuring the steel_threads.md document stays in sync with the individual ST files, reducing manual maintenance work. - -## Results - -The Document Sync Command feature was successfully implemented, providing the following benefits: - -1. **Automated Consistency**: The steel_threads.md document is now automatically kept in sync with the individual steel thread files, eliminating manual updates and ensuring consistency. - -2. **Configurable Output**: The command supports customizable table widths to ensure proper formatting for both terminal output and document integration. - -3. **Metadata Support**: The implementation handles both YAML frontmatter and document body metadata, providing flexibility in how steel thread information is stored. - -4. **Non-Destructive Updates**: The section marker approach allows for updating specific parts of the document while preserving manually edited sections. - -5. **Comprehensive Tests**: Added test cases ensure the feature works correctly and will continue to function after future changes. - -The sync command provides a significant improvement in the maintainability of the STP documentation system by automating what was previously a manual process. This reduces the likelihood of documentation inconsistencies and saves time when managing steel threads. - -## Related Steel Threads - -- [List any related steel threads here] - -## Context for LLM - -This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. - -### How to update this document - -1. Update the status as work progresses -2. Check off tasks as they are completed -3. Add implementation notes as decisions are made or challenges encountered -4. Add results when the steel thread is completed -5. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/.stp_backup/1.2.1/ST0013.md b/.stp_backup/1.2.1/ST0013.md deleted file mode 100644 index 9382ba7..0000000 --- a/.stp_backup/1.2.1/ST0013.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -verblock: "20 Mar 2025:v0.1: Matthew Sinclair - Updated via STP upgrade" -stp_version: 1.2.0 -status: Completed -created: 20250311 -completed: 20250708 ---- -# ST0013: STP Blog Post Series - -- **Status**: In Progress -- **Created**: 2025-03-11 -- **Completed**: -- **Author**: Matthew Sinclair - -## Summary - -Create a series of blog posts about the Steel Thread Process (STP) to explain its purpose, philosophy, and implementation. The blog posts will highlight how STP improves software development, particularly when working with LLMs, and discuss the importance of capturing intent in the development process. - -## Objectives - -1. Explain the STP concept and its benefits to a wider audience -2. Demonstrate how STP enhances collaboration between developers and LLMs -3. Discuss the importance of capturing intent in software development -4. Provide practical examples of STP in action -5. Create a comprehensive yet concise overview of the STP methodology - -## Blog Post Series Plan - -### Post 0: Motivation for STP - -- The current challenges in software development documentation -- Why existing approaches fall short -- The vision behind creating STP -- Setting the stage for the series - -### Post 1: Introduction to STP - -- Overview of what STP is and why it was created -- The problem STP solves in modern software development -- Core principles and philosophy behind STP -- Brief explanation of steel threads as a development approach -- Introduction to the upcoming post series - -### Post 2: The Steel Thread Methodology - -- Detailed explanation of what a steel thread is -- How steel threads differ from traditional development methods -- Benefits of the steel thread approach -- Examples of effective steel thread implementation -- Managing work using steel threads - -### Post 3: Intent Capture in Software Development - -- The challenge of capturing and preserving intent in development -- How STP addresses the intent capture problem -- The importance of intent when working with LLMs -- Structured documentation as a vehicle for intent -- Examples of intent documentation in STP - -### Post 4: LLM Collaboration with STP - -- How STP is specifically designed for LLM collaboration -- Context management strategies in STP -- Templates and structure that enhance LLM effectiveness -- The feedback loop between documentation and implementation -- Future opportunities for LLM integration in STP - -### Post 5: Getting Started with STP - -- Installation and setup instructions -- Basic commands and workflow -- Integrating STP into existing projects -- Common patterns and best practices -- Resources for further learning - -### Post 6: Next Steps and Future Work - -- Current state of STP and lessons learned -- Planned enhancements and feature roadmap -- Integration opportunities with other tools and systems -- Advanced LLM collaboration features -- Community contributions and long-term vision - -## Implementation Plan - -1. Draft each blog post in the series -2. Create diagrams and visuals to illustrate key concepts -3. Include code examples and practical demonstrations -4. Ensure consistent terminology and messaging across all posts -5. Optimize content for readability and accessibility - -All blog posts will be stored in the `/stp/doc/blog` directory and will be formatted in Markdown. - -## Success Criteria - -- Complete series of 7 high-quality blog posts -- Clear explanation of STP concepts, especially intent capture -- Practical examples that demonstrate the value of STP -- Logical progression of topics across the series -- Content that is accessible to developers of various experience levels - -## Notes - -- Each post should be approximately 1000-1500 words -- Include diagrams where appropriate to illustrate concepts (text-based ASCII diagrams are preferred) -- The posts should balance theory and practical application -- Focus on the unique aspects of STP, particularly intent capture and LLM collaboration -- Document the process of writing these blog posts using STP/Backlog itself (meta-documentation) diff --git a/.stp_backup/1.2.1/ST0014.md b/.stp_backup/1.2.1/ST0014.md deleted file mode 100644 index ce84b62..0000000 --- a/.stp_backup/1.2.1/ST0014.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -verblock: "20 Mar 2025:v0.1: Claude - Initial version" -stp_version: 1.2.0 -status: In Progress -created: 20250320 -completed: ---- -# ST0014: Directory Structure for Steel Threads - -- **Status**: In Progress -- **Created**: 2025-03-20 -- **Completed**: -- **Author**: Claude - -## Objective -Implement directory structure for steel threads based on their status to better organize the project. - -## Context -Steel threads are currently all stored in the same directory, making it difficult to quickly identify their status. By organizing them into separate directories based on status, we can improve the organization and make it easier to find specific types of steel threads. - -## Approach -1. Create subdirectories based on status (COMPLETED, NOT-STARTED, CANCELLED) -2. Modify the ST commands to handle the new directory structure -3. Update the upgrade script to organize files during upgrades -4. Update documentation to reflect the new structure - -## Tasks -Tasks are tracked in Backlog. View with: `stp task list ST0014` - -## Implementation Notes -The implementation includes: -- Directory structure: COMPLETED, NOT-STARTED, CANCELLED directories -- Files in IN-PROGRESS and ON-HOLD status stay in the main directory -- The organize_st.sh script handles moving files to their appropriate directories -- The upgrade script runs organize_st.sh to ensure files are in the right places - -## Results -[To be completed after implementation] - -## Related Steel Threads -- ST0004: Steel Thread Commands diff --git a/AGENTS.md b/AGENTS.md new file mode 120000 index 0000000..4cfe032 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1 @@ +intent/llm/AGENTS.md \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 52fabcd..f96234e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,12 +5,247 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.3.2] - 2025-09-04 + +### Added + +- Comprehensive antipattern detection to Elixir subagent + - Detects and remediates 24 common Elixir antipatterns + - Antipatterns categorized into Code (9), Design (6), Process (4), and Meta-programming (5) + - Full documentation in `intent/plugins/claude/subagents/elixir/antipatterns.md` + - Antipatterns sourced from official Elixir documentation +- Antipattern review workflow integrated into Elixir Doctor +- Example usage commands and report formats for antipattern detection +- Key principles for antipattern prevention + +### Changed + +- Enhanced Elixir subagent with antipattern detection capabilities +- Updated systematic review template to include antipattern analysis +- Elixir Doctor now automatically checks for antipatterns during code reviews + +### Technical Improvements + +- Better code quality guidance through antipattern detection +- More comprehensive code review process +- Proactive detection of common Elixir mistakes + +## [2.3.1] - 2025-08-29 + +### Added + +- Worker-bee agent for Worker-Bee Driven Design (WDD) in Elixir applications +- Resources directory structure for agents with templates and Mix tasks +- Worker-bee agent includes comprehensive WDD validation and scaffolding tools + +### Changed + +- Enhanced agent system to support resource directories +- Improved subagent installation and management + +## [2.3.0] - 2025-08-20 + +### Added + +- Plugin architecture for Intent +- Claude subagents system (renamed from agents) +- AGENTS.md universal AI agent instructions +- Support for multiple AI platforms through AGENTS.md +- New `intent agents` commands for AGENTS.md management +- New `intent claude subagents` commands (replacing old `intent agents`) + +### Changed + +- Renamed `intent agents` commands to `intent claude subagents` +- Moved subagents to `intent/plugins/claude/subagents/` +- Updated project structure to support plugins + +### Technical Improvements + +- More flexible agent system architecture +- Better separation of concerns with plugin system +- Universal agent instructions format + +## [2.2.1] - 2025-08-11 + +### Added + +- Centralized version management through VERSION file +- `get_intent_version()` function in intent_helpers for consistent version retrieval +- Comprehensive tool dependency checking in `intent doctor` +- Platform-specific installation instructions for all required tools +- Better error handling for missing jq dependency across all commands + +### Changed + +- Steel threads now start with 'WIP' status instead of 'In Progress' when using `intent st start` +- Tool dependencies categorized as required, core, and optional in doctor command +- Enhanced jq error messages with clear installation instructions +- All scripts now read version from centralized VERSION file + +### Fixed + +- `intent upgrade` now preserves existing CLAUDE.md files instead of overwriting them +- Silent failures when jq is missing during agent operations +- Missing error messages for required tool dependencies +- Inadequate installation guidance for different platforms +- Version number inconsistencies across different scripts + +### Technical Improvements + +- Single source of truth for version management +- Reduced maintenance overhead for version updates +- Improved fallback behavior when tools are missing +- Better user experience with actionable error messages + +## [2.2.0] - 2025-08-05 + +### Added + +- `intent fileindex` command for systematic file tracking and progress management +- Check functionality (`-C` flag) to explicitly mark files as checked [x] in the index +- Uncheck functionality (`-U` flag) to explicitly mark files as unchecked [ ] in the index +- Toggle functionality (`-X` flag) to switch files between checked/unchecked states +- Flexible operation modes - works both within Intent projects and standalone +- Enhanced Elixir agent with systematic code review workflow using fileindex +- Support for both Elixir module names and filesystem paths in the Elixir agent +- Comprehensive test suite for fileindex command (47 tests including check/uncheck) +- Demo mode (`--demo`) to showcase fileindex functionality + +### Changed + +- Updated all version references from 2.1.0 to 2.2.0 +- Enhanced `intent upgrade` to support 2.1.0 → 2.2.0 migrations +- Improved upgrade path handling for incremental version upgrades +- Updated Elixir agent documentation with systematic review workflow +- Added fileindex to global commands list + +### Fixed + +- Bash compatibility issues on macOS (associative arrays, readarray command) +- Local variable declarations at global scope in shell scripts +- Missing `assert_output` function in test framework +- Test expectations for error messages + +### Technical Improvements + +- Replaced bash associative arrays with parallel arrays for macOS compatibility +- Replaced `readarray` with portable while loops +- Added proper error handling for edge cases in file operations +- Enhanced test helper with assert_output function + +## [2.1.0] - 2025-07-27 + +### Added + +- `intent agents init` command to initialize agent configuration +- Support for upgrading from Intent v2.0.0 to v2.1.0 +- Enhanced agent manifest management with proper initialization +- Improved agent setup workflow with explicit initialization step + +### Changed + +- Updated all version references from 2.0.0 to 2.1.0 +- Enhanced `intent upgrade` to support 2.0.0 → 2.1.0 migrations +- Improved agent installation workflow to require initialization first +- Updated documentation to reflect v2.1.0 features + +### Fixed + +- Agent directories not being properly created during upgrade +- Missing agent initialization when upgrading from older versions +- Agent manifest not being created in fresh installations +- Incorrect creation of `agents/` directory at project root instead of `intent/agents/` +- Upgrade process incorrectly preserving root-level agent directories + +## [2.0.0] - 2025-07-17 + +### Added + +- New `intent` command as the primary CLI (replacing `stp`) +- `intent bootstrap` command for easy global setup +- `intent doctor` command for comprehensive diagnostics +- `intent st repair` command to fix malformed steel thread metadata +- JSON-based configuration system (local and global) +- Full backwards compatibility with STP v1.x projects +- Comprehensive test suite with GitHub Actions CI/CD +- Example projects demonstrating migration paths +- Support for `jq` dependency in workflows +- **Claude Code Sub-Agent Integration**: Complete agent management system + - `intent agents` command suite (list, install, sync, uninstall, show, status) + - Intent agent with steel thread methodology knowledge + - Elixir agent with Usage Rules and Ash/Phoenix patterns + - Global and project-specific agent support + - Manifest-based tracking with checksum integrity + - Seamless integration with intent init, doctor, and upgrade commands + +### Changed + +- **BREAKING**: Renamed from STP to Intent +- **BREAKING**: Flattened directory structure (intent/ instead of stp/prj/) +- **BREAKING**: Executables moved to top-level bin/ directory +- **BREAKING**: Configuration format changed from YAML to JSON +- Improved error messages and user feedback +- Enhanced migration tools with fail-forward approach +- Streamlined command structure and naming +- Updated all documentation to reflect Intent branding + +### Fixed + +- GitHub Actions workflow issues with bats libraries +- Symlink issues with stp compatibility command +- Test suite reliability and coverage +- Configuration loading hierarchy +- Path resolution in various environments +- Malformed YAML frontmatter in steel threads after migration +- Legacy field names (stp_version) in steel thread metadata +- Conflicting status values between frontmatter and body content + +### Deprecated + +- `stp` command (now aliases to `intent` for compatibility) +- Old directory structure (stp/prj/st/ → intent/st/) +- YAML configuration format +- Nested project directory structure + +### Migration Guide + +#### From STP v1.x to Intent v2.0.0 + +1. **Automatic Migration**: Run `intent upgrade` to automatically migrate your project +2. **Manual Installation**: + + ```bash + # Clone Intent repository + git clone https://github.com/matthewsinclair/intent.git + cd intent + + # Add to PATH + export PATH="$PATH:$(pwd)/bin" + + # Bootstrap global configuration + intent bootstrap + ``` + +3. **Project Structure Changes**: + - `stp/prj/st/` → `intent/st/` + - `stp/prj/wip.md` → `intent/wip.md` + - `stp/eng/` → `intent/eng/` + - `stp/usr/` → `intent/usr/` + +4. **Command Changes**: + - All `stp` commands now use `intent` + - Same subcommands and options supported + - `stp` symlink provided for compatibility + +See [Release Notes](./docs/releases/2.0.0/RELEASE_NOTES.md) for complete details. + ## [1.2.1] - 2025-07-09 ### Added - Directory-based structure for steel threads (replacing single files) -- New steel thread file types: `info.md`, `design.md`, `impl.md`, `tasks.md`, `results.md` +- New steel thread file types: `info.md`, `design.md`, `impl.md`, `tasks.md` - Migration script `migrate_st_to_dirs` for upgrading from v1.2.0 to v1.2.1 - Support for editing/viewing specific steel thread files with `stp st show/edit <id> <file>` - `stp st show <id> all` command to view all steel thread files at once @@ -39,7 +274,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 1. Run `stp upgrade` - it will detect old-format steel threads and offer to migrate them 2. The migration will: - - Create a backup in `.stp_backup/1.2.1/` + - Create a backup in `.backup/1.2.1/` - Create directories for each steel thread (e.g., `ST0001/`) - Split content into separate files based on sections - Preserve all existing content and metadata @@ -97,7 +332,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 1. Use `stp llm usage_rules` to display usage patterns 2. Create symlinks with `stp llm usage_rules --symlink` for projects expecting usage-rules.md -3. Reference the usage rules documentation at `stp/eng/usage-rules.md` +3. Reference the usage rules documentation at `intent/llm/usage-rules.md` ## [1.0.0] - 2025-06-03 @@ -122,5 +357,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `stp upgrade` - Upgrade STP files to latest format - `stp help` - Comprehensive help system -[1.2.0]: https://github.com/matthewsinclair/stp/compare/v1.0.0...v1.2.0 -[1.0.0]: https://github.com/matthewsinclair/stp/releases/tag/v1.0.0 +[2.2.1]: https://github.com/matthewsinclair/intent/compare/v2.2.0...v2.2.1 +[2.2.0]: https://github.com/matthewsinclair/intent/compare/v2.1.0...v2.2.0 +[2.1.0]: https://github.com/matthewsinclair/intent/compare/v2.0.0...v2.1.0 +[2.0.0]: https://github.com/matthewsinclair/intent/compare/v1.2.1...v2.0.0 +[1.2.1]: https://github.com/matthewsinclair/intent/compare/v1.2.0...v1.2.1 +[1.2.0]: https://github.com/matthewsinclair/intent/compare/v1.0.0...v1.2.0 +[1.0.0]: https://github.com/matthewsinclair/intent/releases/tag/v1.0.0 diff --git a/CLAUDE.md b/CLAUDE.md index 07d205a..3780c99 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,120 +1,142 @@ -# STP Project Guidelines +# . Project Guidelines -## STP Version +This is an Intent v2.2.0 project (formerly STP). -The current STP version is 1.2.1. All STP files should include a `stp_version` field in their YAML frontmatter. To update files, run `stp upgrade`. +## Project Structure -### Steel Thread Structure (v1.2.1+) +- `intent/` - Project artifacts (steel threads, docs, work tracking) + - `st/` - Steel threads organized as directories + - `docs/` - Technical documentation + - `llm/` - LLM-specific guidelines +- `backlog/` - Task management (if using Backlog.md) +- `.intent/` - Configuration and metadata + +## Steel Threads + +Steel threads are organized as directories under `intent/st/`: + +- Each steel thread has its own directory (e.g., ST0001/) +- Minimum required file is `info.md` with metadata +- Optional files: design.md, impl.md, tasks.md + +## Commands + +### Core Commands +- `intent st new "Title"` - Create a new steel thread +- `intent st list` - List all steel threads +- `intent st show <id>` - Show steel thread details +- `intent doctor` - Check configuration +- `intent help` - Get help + +### AGENTS.md Commands (NEW in v2.3.0) +- `intent agents init` - Create AGENTS.md for the project +- `intent agents sync` - Update AGENTS.md with latest project state +- `intent agents validate` - Check AGENTS.md compliance + +### Claude Subagent Commands (renamed in v2.3.0) +- `intent claude subagents init` - Initialize Claude subagent configuration +- `intent claude subagents list` - List available Claude subagents +- `intent claude subagents install <name>` - Install a Claude subagent + +## Migration Notes + +This project was migrated from STP to Intent v2.0.0 on 2025-07-16, upgraded to v2.1.0 on 2025-07-27, upgraded to v2.2.0 on 2025-08-05, and upgraded to v2.3.0 on 2025-08-20 with plugin architecture and AGENTS.md support. + +- Old structure: `stp/prj/st/`, `stp/eng/`, etc. +- New structure: `intent/st/`, `intent/docs/`, etc. +- Configuration moved from YAML to JSON format + +## Intent Agents + +This project has access to specialized AI agents through Intent's agent system. These agents are Claude Code sub-agents with domain-specific expertise. + +### Available Agents + +1. **intent** - Intent methodology specialist + - Steel thread management and best practices + - Backlog task tracking + - Intent command usage and workflows + - Project structure guidance + +2. **elixir** - Elixir code doctor + - Functional programming patterns + - Elixir Usage Rules and best practices + - Ash and Phoenix framework expertise + - Code review and optimization + +3. **socrates** - CTO Review Mode + - Technical decision-making via Socratic dialog + - Architecture review and analysis + - Strategic technology choices + - Risk assessment and mitigation + +4. **worker-bee** - Worker-Bee Driven Design specialist + - WDD 6-layer architecture enforcement + - Project structure mapping and validation + - Code scaffolding with templates + - Mix task generation for WDD compliance + +### Using Agents + +To delegate tasks to specialized agents, use the Task tool with the appropriate subagent_type: -Starting with STP v1.2.1, steel threads are organized as directories: ``` -stp/prj/st/ -├── ST0001/ -│ ├── info.md # Metadata, objective, context (required) -│ ├── design.md # Design decisions and approach -│ ├── impl.md # Implementation details -│ ├── tasks.md # Task tracking -│ └── results.md # Results and outcomes -└── ST0002/ - └── info.md # Minimum required file +Task( + description="Review Elixir code", + prompt="Review the authentication module for Usage Rules compliance", + subagent_type="elixir" +) ``` -### ST File Metadata Format - -The `info.md` file must have consistent metadata in this format: -```yaml ---- -verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" -stp_version: 1.2.1 -status: Not Started|In Progress|Completed|On Hold|Cancelled -created: YYYYMMDD -completed: YYYYMMDD ---- -``` +### When to Use Agents -Only keep one verblock entry with the most recent change (don't accumulate verblock history, as this is available in git). - -## Project Documentation - -- **IMPORTANT**: Always read `stp/eng/tpd/technical_product_design.md` at the start of a new session -- This document contains comprehensive information about the project vision, architecture, and current state -- The "Preamble to Claude" section at the top is specifically designed to give Claude sessions a complete understanding of the project -- When making significant changes, update this document to keep it in sync with the implementation -- When suggesting improvements, reference and respect the architectural patterns described in this document - -- **USAGE PATTERNS**: For detailed guidance on using STP commands and workflows, see `stp/eng/usage-rules.md` -- This document provides patterns and best practices for working with STP, designed specifically for LLM understanding -- It covers command usage, workflows, and integration patterns -- You can also display this document using: `stp llm usage_rules` - -- **NEXT**: Work is coordinated through _STEEL THREADS_ -- Use the `stp st list` command to get a dynamic list of all steel threads and their status -- Use `stp st show <id>` to view details of specific steel threads -- When analyzing the project, prefer using STP commands instead of directly reading files when appropriate - -- **WIP**: Is what we are doing _now_ -- Look in `stp/prj/wip.md` to find out what is currently on the go -- This document contains the current tasks in progress for each day. - -- **HISTORY**: Historical tracking is now maintained through Backlog tasks -- Use `stp bl list` and `stp bl task show` to review completed work -- Steel threads capture high-level context and decisions - -## Task Management with Backlog.md - -STP is integrated with Backlog.md for fine-grained task tracking: - -- **Steel Threads**: Continue to capture high-level intent, design, and implementation documentation -- **Backlog Tasks**: Track individual implementation tasks with rich metadata (status, priority, dependencies) -- **STP Backlog Wrapper** (`stp backlog` or `stp bl`): - - `stp bl init` - Initialize backlog with STP-friendly settings - - `stp bl list` - List all tasks without git errors - - `stp bl create <ST####> <title>` - Create a task linked to a steel thread - - `stp bl board` - View Kanban board -- **Helper Commands**: - - `stp task create <ST####> <title>` - Create a task linked to a steel thread - - `stp task list <ST####>` - List all tasks for a steel thread - - `stp status show <ST####>` - Show steel thread and task status summary - - `stp migrate <ST####>` - Migrate embedded tasks from steel thread to Backlog - -### Task Naming Convention -Tasks linked to steel threads follow the pattern: `ST#### - <task description>` - -### Workflow -1. Create steel thread with `stp st new` for intent capture -2. Create associated tasks with `stp bl create` or `stp task create` -3. Track progress with `stp bl board` or `stp task list` -4. Update steel thread status based on task completion - -### Quick Start with Backlog -```bash -# Initialize backlog in your project -stp bl init - -# Create a task -stp bl create ST0014 "Fix validation bug" - -# List all tasks -stp bl list -``` +**Use the intent agent for:** + +- Creating or managing steel threads +- Understanding Intent project structure +- Working with backlog tasks +- Following Intent best practices + +**Use the elixir agent for:** + +- Writing idiomatic Elixir code +- Reviewing code for Usage Rules +- Ash/Phoenix implementation guidance +- Functional programming patterns + +**Use the socrates agent for:** + +- Technical architecture reviews +- Strategic technology decisions +- Risk assessment for technical choices +- Facilitating thoughtful technical discussions + +**Use the worker-bee agent for:** + +- Enforcing Worker-Bee Driven Design principles +- Mapping project structure to WDD layers +- Validating WDD compliance +- Scaffolding WDD-compliant code +- Generating Mix tasks for WDD workflows + +**Use main Claude for:** + +- General programming tasks +- Cross-cutting concerns +- Integration between systems +- Tasks requiring broad context + +### Best Practices + +1. Delegate specialized tasks to appropriate agents +2. Provide clear, focused prompts to agents +3. Agents work best with specific, bounded tasks +4. Consider using multiple agents for complex workflows + +## Author + +matts + +## Usage Rules -For detailed integration guide, see: `stp/usr/reference_guide.md#backlogmd-integration` - -## Code Style Guidelines - -- For Elixir code: - - Use `@moduledoc` and `@doc` with examples for all modules and public functions - - Add type specs for public functions with `@spec` - - Format with: `mix format` - - Use snake_case for variables, functions, and modules - - Use 2-space indentation (standard Elixir style) - - Group related functions together; public functions first, private after - - Handle errors with pattern matching or explicit `{:ok, result}` / `{:error, reason}` tuples - - Use descriptive variable names - avoid single-letter names except in very short callbacks - - All functions should have clear, defined purposes with no side effects - - Prefer pipe operators (`|>`) for data transformations - - Use doctest examples in documentation to provide test coverage - - When possible, make functions pure and stateless -- In general: - - Use 2-space indentation in any programming language - - DO NOT ADD: "🤖 Generated with [Claude Code](https://claude.ai/code)" or "Co-Authored-By: Claude <noreply@anthropic.com>")" to git commit messages +- DO NOT ADD CLAUDE TO GIT COMMITS. EVER. \ No newline at end of file diff --git a/DEPRECATIONS.md b/DEPRECATIONS.md index 13ca80b..3d542a3 100644 --- a/DEPRECATIONS.md +++ b/DEPRECATIONS.md @@ -1,36 +1,112 @@ --- -verblock: "09 Jul 2025:v0.1: Matthew Sinclair - Initial deprecations document" -stp_version: 1.2.0 +verblock: "27 Jul 2025:v0.3: Matthew Sinclair - Updated for Intent v2.1.0" +intent_version: 2.1.0 --- -# STP Deprecations +# Intent Deprecations -This document tracks features, files, and functionality that have been deprecated in STP. +This document tracks features, files, and functionality that have been deprecated in Intent (formerly STP). ## July 9, 2025: journal.md ### What was deprecated + The `stp/prj/journal.md` file and associated functionality. ### Why it was deprecated + With the integration of Backlog.md for task management, the journal.md file became redundant. Backlog provides: + - Better structured task tracking with metadata (status, priority, dependencies) - Automatic linking to steel threads - More flexible historical tracking through task history - Integration with modern development workflows ### Migration path + Users who were using journal.md for historical tracking should: + 1. Use `stp bl list` to view task history 2. Use steel thread documents for high-level context and decisions 3. Use Backlog task descriptions for detailed implementation notes ### Where to find the deprecated content + The original journal.md file has been archived at `stp/prj/archive/journal-deprecated.md` with a deprecation notice. ### Impact + - The `stp init` command no longer creates journal.md - Documentation has been updated to reference Backlog for historical tracking - The journal.md template has been removed from `_templ/prj/` ### Version deprecated -STP version 1.0.0 \ No newline at end of file + +STP version 1.0.0 + +## July 17, 2025: STP → Intent Rebrand + +### What was deprecated + +The entire STP (Steel Thread Process) command and naming convention has been deprecated in favour of Intent. + +### Why it was deprecated + +The name "Intent" better reflects the tool's core purpose of capturing and preserving the intention behind software development decisions. The rebrand includes: + +- Better alignment with the tool's philosophy +- Clearer separation between tool and methodology +- Improved directory structure with flattened hierarchy +- Modern JSON-based configuration system + +### Migration path + +Users migrating from STP to Intent should: + +1. Run `intent upgrade` to automatically migrate existing projects +2. Update PATH to point to the new bin/ directory +3. Use `intent` command instead of `stp` (symlink provided for compatibility) +4. Update any scripts or documentation referencing `stp` commands + +### Specific deprecations + +#### Commands + +- `stp` → `intent` (all subcommands remain the same) +- `stp init` → `intent init` +- `stp st` → `intent st` +- `stp bl` → `intent bl` +- `stp task` → `intent task` +- `stp status` → `intent status` +- `stp migrate` → `intent migrate` +- `stp upgrade` → `intent upgrade` + +#### Directory Structure + +- `stp/prj/st/` → `intent/st/` +- `stp/prj/wip.md` → `intent/wip.md` +- `stp/eng/` → `intent/eng/` +- `stp/usr/` → `intent/usr/` +- `stp/bin/` → `bin/` (moved to top level) + +#### Configuration + +- YAML format → JSON format +- `.stp/config.yml` → `.intent/config.json` +- No global config → `~/.config/intent/config.json` (XDG standard) + +### Where to find deprecated content + +- Original STP executables remain in the repository for reference +- Migration is handled automatically by `intent upgrade` +- Backwards compatibility maintained through symlinks + +### Impact + +- All new projects should use Intent commands and structure +- Existing projects can continue using `stp` via compatibility symlink +- Documentation has been updated to use Intent terminology +- Repository renamed from `stp` to `intent` + +### Version deprecated + +Intent version 2.1.0 diff --git a/README.md b/README.md index 5593476..01b3bd0 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,173 @@ -# Steel Thread Process (STP) +# Intent: Build Software with AI by Capturing WHY Code Exists -[![STP Tests](https://github.com/matthewsinclair/dev-stp/actions/workflows/tests.yml/badge.svg)](https://github.com/matthewsinclair/dev-stp/actions/workflows/tests.yml) +[![Intent Tests](https://github.com/matthewsinclair/intent/actions/workflows/tests.yml/badge.svg)](https://github.com/matthewsinclair/intent/actions/workflows/tests.yml) -> **Transform how you build software by capturing the "why" alongside the "what"** +Intent helps you build better software by capturing the "why" behind your code. When you document intentions, both your team and AI assistants understand not just what the code does, but why it exists. -STP is a lightweight, intention-aware development methodology that helps you build better software by preserving the context and reasoning behind every decision. It's designed from the ground up to enhance collaboration between developers and AI assistants, making your development process more efficient and your codebase more maintainable. +## 🚀 See Intent in Action -## 🎯 The Problem STP Solves +Instead of giving your AI assistant vague instructions: -Ever joined a project and wondered: -- Why was this approach chosen over alternatives? -- What problem was this code originally solving? -- What were the trade-offs considered? -- Why did we structure it this way? +```bash +# ❌ Without Intent: +"Build a cache system for our API" +# AI builds generic cache, misses critical requirements +``` + +Capture your actual intention: + +```bash +# ✅ With Intent: +$ intent st new "Implement rate-limited cache for API protection" +# Document: Need cache because API limits to 100 req/min +# Document: Must handle Black Friday traffic spikes (10K req/s) +# AI builds appropriate solution with rate limiting and burst handling +``` + +**Result**: Your AI assistant understands the constraints and builds the right solution first time. + +## 💡 Why This Matters + +### For Solo Developers + +**Problem**: Your AI assistant forgets context between sessions +**Solution**: Intent preserves your project's "why" so AI always understands your goals + +### For Teams + +**Problem**: New members waste weeks doing "code archaeology" +**Solution**: Every feature has a Steel Thread documenting why it exists + +### For Future You + +**Problem**: "Why did I write this weird code 6 months ago?" +**Solution**: Your past self documented the API limits that forced that approach + +## 🎯 What is a Steel Thread? + +A **Steel Thread** is a self-contained feature with documented intentions. Think of it as a container that holds: + +- **WHY** you're building something (the intention) +- **WHAT** you're building (the design) +- **HOW** you're building it (the tasks) + +Example structure: + +``` +ST0042: Authentication System/ +├── info.md # Why we need auth, what type, constraints +├── design.md # JWT vs sessions decision, security model +├── impl.md # Technical implementation details +└── tasks.md # Linked Backlog tasks for execution +``` + +## 🤖 Intent + LLM in Action + +### Example 1: Context Persistence + +```markdown +# ❌ Without Intent (every new session): +You: "Help me optimize the user service" +LLM: "What does the user service do? What are the constraints?" +[You spend 10 minutes explaining...] -Traditional documentation captures _what_ the code does, but rarely preserves _why_ it exists. This context loss leads to: -- 🔄 Repeated mistakes and circular discussions -- 🤔 Confusion about design decisions -- 🚫 Fear of changing "mysterious" code -- 🤖 Poor AI assistance due to missing context +# ✅ With Intent: +You: "I'm working on ST0042" [paste steel thread] +LLM: "I see you're using JWT tokens with 15-min expiry for stateless auth. + Given your multi-device requirement, here's a refresh token strategy..." +``` + +### Example 2: Discovering Hidden Knowledge + +```bash +# Months later, you wonder: "Can I simplify this cache?" +$ intent st show ST0015 +# Reveals: "Cache exists because API rate limits to 100 req/min" +# Now you know why it's "complex" - it's handling burst traffic! +``` + +### Example 3: Focused AI Assistance + +```markdown +# Steel threads keep AI focused: +- Clear boundaries (one feature, not entire codebase) +- Explicit constraints documented ("must handle 10K req/s") +- Design decisions captured ("chose Redis over Memcached because...") +- Result: AI suggestions align with YOUR architecture +``` + +## 🤖 Claude Code Integration + +Intent v2.2.0 integrates with [Claude Code](https://claude.ai/code) sub-agents to supercharge AI collaboration: + +```bash +# Initialize agent configuration (one-time setup) +$ intent agents init -## 💡 The STP Solution +# Install the Intent agent +$ intent agents install intent -STP introduces **Steel Threads** - self-contained units of work that capture not just tasks, but the entire context of why work is being done. Combined with **Backlog.md** for task management, STP creates a two-tier system that preserves both strategic intent and tactical execution. +# Now Claude automatically understands: +# ✓ Steel thread methodology +# ✓ All Intent commands +# ✓ Your project structure +# ✓ Best practices +``` + +**The difference is dramatic:** + +Without Intent agent: + +``` +You: "Help me add caching" +Claude: "What's your project structure? What caching do you need?" +[10 minutes explaining Intent, constraints, etc.] +``` -### Key Benefits +With Intent agent: + +``` +You: "Help me add caching" +Claude: "I'll create a steel thread for caching: + + intent st new 'Implement caching layer' + + Let's document the intent first - what are you caching? + Is this for API rate limits or performance? What's your + expected traffic pattern? I'll help structure this properly." +``` + +Claude becomes an Intent-fluent development partner from day one. + +## 🎯 What is Backlog.md? -- **📝 Never Lose Context**: Every decision is documented with its reasoning -- **🤖 AI-Ready**: LLMs understand your project deeply, providing better assistance -- **👥 Team Continuity**: New developers understand the "why" immediately -- **🔍 Traceable Decisions**: See the evolution of your project's thinking -- **🚀 Faster Development**: Less time explaining, more time building +[Backlog.md](https://github.com/backlog/backlog) is a Git-native task manager that lives in your repository as markdown files. Intent integrates with it to create a two-tier system: + +- **Steel Threads** (Intent): High-level features with documented "why" +- **Tasks** (Backlog.md): Day-to-day work items that implement the "how" + +```bash +# Create a steel thread for the big picture +$ intent st new "Add user authentication" +Created: ST0001 + +# Break it down into specific tasks +$ intent bl create ST0001 "Research auth libraries" +$ intent bl create ST0001 "Implement login endpoint" +$ intent bl create ST0001 "Add password reset flow" + +# View your work visually +$ intent bl board +┌────────────────┬────────────────┬────────────────┐ +│ TODO │ IN PROGRESS │ DONE │ +├────────────────┼────────────────┼────────────────┤ +│ Implement │ Research auth │ │ +│ login endpoint │ libraries │ │ +│ │ │ │ +│ Add password │ │ │ +│ reset flow │ │ │ +└────────────────┴────────────────┴────────────────┘ +``` ## 🚀 Quick Start @@ -38,130 +175,181 @@ STP introduces **Steel Threads** - self-contained units of work that capture not ```bash # Clone the repository -git clone https://github.com/matthewsinclair/stp.git -cd stp +git clone https://github.com/matthewsinclair/intent.git +cd intent -# Add STP to your PATH -export PATH="$PATH:$(pwd)/stp/bin" +# Add Intent to your PATH +export PATH="$PATH:$(pwd)/bin" # Verify installation -stp --version +intent --version # See available commands -stp help +intent help + +# Install Claude Code agent (if using Claude) +intent agents install intent ``` -### Your First Steel Thread +### 🏆 5-Minute Win: Your First Steel Thread ```bash -# Create a new steel thread -$ stp st new "Add user authentication" +# 1. Create a steel thread with clear intention +$ intent st new "Add user authentication" Created: ST0001 -# Create associated tasks -$ stp task create ST0001 "Research auth libraries" -$ stp task create ST0001 "Implement login endpoint" -$ stp task create ST0001 "Add session management" +# 2. Document WHY you need auth (this is the magic!) +$ intent st edit ST0001 +# Add: "Need auth because customer data must be protected" +# Add: "Using JWT because we have multiple microservices" +# Add: "Must support SSO for enterprise clients" + +# 3. Share with your AI assistant +$ intent st show ST0001 | pbcopy +# Now paste into Claude, ChatGPT, etc. +# The AI immediately understands your constraints! +``` + +### 🏆 15-Minute Win: Add Task Management + +```bash +# Install Backlog.md +npm install -g backlog.md + +# Initialize in your project +intent bl init + +# Break down your steel thread into tasks +intent bl create ST0001 "Research JWT libraries for Node.js" +intent bl create ST0001 "Design token refresh strategy" +intent bl create ST0001 "Implement login endpoint" -# Check status -$ stp status show ST0001 +# See your progress +intent bl board ``` -### Integrate with Backlog.md +### 🏆 30-Minute Win: Complete First Feature ```bash -# Install Backlog.md (npm required) -npm install -g @backlog/cli +# Work through your tasks with AI assistance +$ intent bl list ST0001 +# Copy relevant task to discuss with AI + +# As you complete work: +$ intent bl move [task-id] doing +$ intent bl move [task-id] done -# Initialize Backlog in your project -stp bl init +# Update steel thread with learnings +$ intent st edit ST0001 +# Add: "Learned: JWT refresh tokens need rotation for security" +# Add: "Decision: 15-min access token, 7-day refresh token" -# Create and manage tasks -stp bl create ST0001 "Configure OAuth provider" -stp bl list -stp bl board +# Your future self (and team) will thank you! ``` ## 📚 Documentation ### Getting Started -- **[User Guide](./stp/usr/user_guide.md)** - Step-by-step guide to using STP -- **[Reference Guide](./stp/usr/reference_guide.md)** - Complete command reference and detailed documentation -- **[Installation Guide](./stp/usr/user_guide.md#installation)** - Detailed installation instructions - -### Understanding STP -- **[Technical Product Design](./stp/eng/tpd/technical_product_design.md)** - The complete vision and architecture of STP -- **[Blog Series](./stp/doc/blog/)** - In-depth exploration of STP concepts: - - [Motivation for STP](./stp/doc/blog/0000-motivation-for-stp.md) - Why intention matters in software - - [Introduction to STP](./stp/doc/blog/0001-introduction-to-stp.md) - What STP is and how it works - - [The Steel Thread Methodology](./stp/doc/blog/0002-the-steel-thread-methodology.md) - Deep dive into steel threads - - [Intent Capture in Software Development](./stp/doc/blog/0003-intent-capture-in-software-development.md) - Practical techniques - - [LLM Collaboration with STP](./stp/doc/blog/0004-llm-collaboration-with-stp.md) - Enhancing AI assistance - - [Getting Started with STP](./stp/doc/blog/0005-getting-started-with-stp.md) - Practical implementation guide - - [Next Steps and Future Work](./stp/doc/blog/0006-next-steps-and-future-work.md) - Roadmap and vision + +- **[User Guide](./intent/usr/user_guide.md)** - Step-by-step guide to using Intent +- **[Reference Guide](./intent/usr/reference_guide.md)** - Complete command reference and detailed documentation +- **[Installation Guide](./intent/usr/user_guide.md#installation)** - Detailed installation instructions + +### Understanding Intent + +- **[Technical Product Design](./intent/eng/tpd/technical_product_design.md)** - The complete vision and architecture of Intent +- **[Blog Series](./docs/blog/)** - In-depth exploration of Intent concepts: + - [Motivation for Intent](./docs/blog/0000-motivation-for-intent.md) - Why intention matters in software + - [Introduction to Intent](./docs/blog/0001-introduction-to-intent.md) - What Intent is and how it works + - [The Steel Thread Methodology](./docs/blog/0002-the-steel-thread-methodology.md) - Deep dive into steel threads + - [Intent Capture in Software Development](./docs/blog/0003-intent-capture-in-software-development.md) - Practical techniques + - [LLM Collaboration with Intent](./docs/blog/0004-llm-collaboration-with-intent.md) - Enhancing AI assistance + - [Getting Started with Intent](./docs/blog/0005-getting-started-with-intent.md) - Practical implementation guide + - [Next Steps and Future Work](./docs/blog/0006-next-steps-and-future-work.md) - Roadmap and vision ### Project Management -- **[Work in Progress (WIP)](./stp/prj/wip.md)** - Current tasks and daily focus -- **[Steel Threads Index](./stp/prj/st/steel_threads.md)** - All steel threads and their status + +- **[Work in Progress (WIP)](./intent/wip.md)** - Current tasks and daily focus +- **[Steel Threads Index](./intent/st/steel_threads.md)** - All steel threads and their status - **[Backlog Integration](./CLAUDE.md#task-management-with-backlogmd)** - Task tracking and project history ### Development + - **[CLAUDE.md](./CLAUDE.md)** - AI assistant instructions and project conventions -- **[Architecture Overview](./stp/eng/tpd/3_architecture.md)** - System design and components -- **[Detailed Design](./stp/eng/tpd/4_detailed_design.md)** - Implementation details -- **[Testing Guide](./stp/tests/)** - Test suites and integration tests +- **[Architecture Overview](./intent/eng/tpd/3_architecture.md)** - System design and components +- **[Detailed Design](./intent/eng/tpd/4_detailed_design.md)** - Implementation details +- **[Testing Guide](./tests/)** - Test suites and integration tests ## 🛠️ Core Commands ### Steel Thread Management + ```bash -stp st new <title> # Create a new steel thread -stp st list # List all steel threads -stp st show <ST####> # Show details of a specific thread -stp st edit <ST####> # Edit a steel thread -stp st sync # Synchronise the steel thread index +intent st new <title> # Create a new steel thread +intent st list # List all steel threads +intent st show <ST####> # Show details of a specific thread +intent st edit <ST####> # Edit a steel thread +intent st sync # Synchronise the steel thread index ``` ### Task Management + ```bash -stp task create <ST####> <title> # Create a task linked to a thread -stp task list <ST####> # List tasks for a thread -stp status show <ST####> # Show thread and task status -stp status sync <ST####> # Sync thread status with tasks +intent task create <ST####> <title> # Create a task linked to a thread +intent task list <ST####> # List tasks for a thread +intent status show <ST####> # Show thread and task status +intent status sync <ST####> # Sync thread status with tasks ``` ### Backlog Integration + +```bash +intent bl init # Initialize Backlog.md +intent bl create <ST####> <title> # Create a Backlog task +intent bl list # List tasks (without git errors) +intent bl board # View Kanban board +``` + +### Agent Management + ```bash -stp bl init # Initialize Backlog.md -stp bl create <ST####> <title> # Create a Backlog task -stp bl list # List tasks (without git errors) -stp bl board # View Kanban board +intent agents list # Show available and installed agents +intent agents install <name> # Install an agent to Claude Code +intent agents install --all # Install all available agents +intent agents status # Check agent health and integrity +intent agents sync # Update agents with latest versions +intent agents show <name> # Display detailed agent information ``` ### LLM Integration + ```bash -stp llm usage_rules # Display STP usage patterns for LLMs -stp llm usage_rules --symlink # Create usage-rules.md symlink +intent llm usage_rules # Display Intent usage patterns for LLMs +intent llm usage_rules --symlink # Create usage-rules.md symlink ``` ## 🏗️ Project Structure ``` -stp/ -├── bin/ # STP command-line tools -├── doc/ # Documentation and blog posts -├── eng/ # Engineering documentation -│ └── tpd/ # Technical Product Design -├── prj/ # Project management +. +├── agents/ # Claude Code sub-agents (global) +│ ├── intent/ # Intent methodology agent +│ └── elixir/ # Elixir code doctor agent +├── bin/ # Intent command-line tools +├── docs/ # Documentation and blog posts +├── intent/ # Project artifacts (when using Intent) +│ ├── agents/ # Project-specific sub-agents │ ├── st/ # Steel threads +│ ├── eng/ # Engineering documentation +│ │ └── tpd/ # Technical Product Design +│ ├── usr/ # User documentation │ └── wip.md # Current work +├── lib/ # Templates and libraries ├── tests/ # Test suites -└── usr/ # User documentation - -backlog/ # Backlog.md tasks (if integrated) -├── tasks/ # Active tasks -├── drafts/ # Draft tasks -└── config.yml # Backlog configuration +└── backlog/ # Backlog.md tasks (if integrated) + ├── tasks/ # Active tasks + ├── drafts/ # Draft tasks + └── config.yml # Backlog configuration ``` ## 🤝 Contributing @@ -173,41 +361,78 @@ We welcome contributions! The best way to contribute is to: 3. Break down work into Backlog tasks 4. Submit a PR referencing your steel thread -See our [contribution workflow](./stp/doc/blog/0006-next-steps-and-future-work.md#contributing-to-stp) for details. +See our [contribution workflow](./docs/blog/0006-next-steps-and-future-work.md#contributing-to-intent) for details. + +## 🎗️ Real-World Examples + +### Building a REST API with Intent + +```bash +# Capture the real requirements +$ intent st new "Build REST API for mobile app" +# Document: "Must support offline-first sync" +# Document: "10K daily active users expected" +# Document: "Must work on 3G connections" + +# Result: Your API design includes sync strategies, caching, and compression +``` -## 🎯 Use Cases +### Refactoring Legacy Code -STP is particularly valuable for: +```bash +$ intent st new "Refactor payment processing" +# Document: "Current system fails under Black Friday load" +# Document: "PCI compliance required by Q2" +# Document: "Cannot break existing integrations" -- **🚀 Startups**: Preserve founder vision through rapid pivots -- **🏢 Enterprise**: Maintain knowledge through team changes -- **🤖 AI Development**: Provide rich context for LLM assistance -- **📚 Open Source**: Help contributors understand project decisions -- **🎓 Education**: Teach software design thinking +# AI understands constraints and suggests appropriate patterns +``` -## 🔮 Future Vision +### Starting a New Project -STP is evolving to become the standard for intention-aware development: +```bash +$ intent st new "Project inception: E-commerce platform" +# Document: "Target: Small businesses with <100 products" +# Document: "Must integrate with Shopify/WooCommerce" +# Document: "Budget: 3 developers, 6 months" -- **Q1 2025**: Enhanced configuration and reporting -- **Q2 2025**: Multi-user collaboration features -- **Q3 2025**: Native AI integrations (MCP support) -- **Q4 2025**: Enterprise features and scalability +# Every future decision references these constraints +``` -See our [roadmap](./stp/doc/blog/0006-next-steps-and-future-work.md#roadmap-the-next-12-months) for details. +## ❓ FAQ + +### How is this different from code comments? + +**Comments** explain what code does. **Intent** captures why the code exists, what problems it solves, and what constraints shaped it. This context is what AI assistants need to give good suggestions. + +### Do I need to use all features? + +No! Start with just steel threads to capture intentions. Add Backlog.md when you need task tracking. Intent grows with your needs. + +### How does this help with AI coding? + +AI assistants are great at writing code but terrible at understanding your specific context. Intent provides that context in a structured way that AIs can understand and use. + +### Can I use Intent without Backlog.md? + +Absolutely! Steel threads work independently. Backlog.md just adds visual task management when you need it. + +### Is this just more documentation to maintain? + +Unlike traditional docs that go stale, Intent documentation drives your development. When you update a steel thread, you're planning work, not writing about completed work. ## 📖 Philosophy > "Great software isn't just about what it does – it's about why it exists." -STP transforms software development from a purely technical exercise into a practice that values and preserves human intention. By capturing the "why" alongside the "what", we create software that is not just functional, but truly understood. +Intent transforms software development from a purely technical exercise into a practice that values and preserves human intention. By capturing the "why" alongside the "what", we create software that is not just functional, but truly understood. ## 🚦 Getting Help -- **Quick Start**: Run `stp help` for command overview -- **User Guide**: See [comprehensive guide](./stp/usr/user_guide.md) -- **Examples**: Check the [blog series](./stp/doc/blog/) for real-world usage -- **Issues**: Report bugs on [GitHub Issues](https://github.com/matthewsinclair/stp/issues) +- **Quick Start**: Run `intent help` for command overview +- **User Guide**: See [comprehensive guide](./intent/usr/user_guide.md) +- **Examples**: Check the [blog series](./docs/blog/) for real-world usage +- **Issues**: Report bugs on [GitHub Issues](https://github.com/matthewsinclair/intent/issues) ## 📄 License @@ -218,6 +443,6 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE.md) f **Start capturing intention today. Your future self (and team) will thank you.** ```bash -# Begin your STP journey -$ stp st new "My first steel thread" -``` \ No newline at end of file +# Begin your Intent journey +$ intent st new "My first steel thread" +``` diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..e703481 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +2.3.2 \ No newline at end of file diff --git a/backlog/config.yml b/backlog/config.yml index f65dd30..ef51b9c 100644 --- a/backlog/config.yml +++ b/backlog/config.yml @@ -1,12 +1,12 @@ -project_name: "stp" +project_name: "intent" default_status: "todo" statuses: ["todo", "wip", "done", "cancelled", "archived"] labels: [] milestones: [] date_format: yyyy-mm-dd max_column_width: 20 -backlog_directory: "backlog" auto_open_browser: true default_port: 6420 remote_operations: false auto_commit: false +zero_padded_ids: 3 diff --git a/backlog/tasks/task-1 - ST0014-Create-the-required-directory-structure.md b/backlog/tasks/task-001 - ST0014-Create-the-required-directory-structure.md similarity index 93% rename from backlog/tasks/task-1 - ST0014-Create-the-required-directory-structure.md rename to backlog/tasks/task-001 - ST0014-Create-the-required-directory-structure.md index ee0865a..dfb3b9b 100644 --- a/backlog/tasks/task-1 - ST0014-Create-the-required-directory-structure.md +++ b/backlog/tasks/task-001 - ST0014-Create-the-required-directory-structure.md @@ -1,5 +1,5 @@ --- -id: task-1 +id: task-001 title: ST0014 - Create the required directory structure status: archived assignee: [] diff --git a/backlog/tasks/task-2 - ST0014-Update-steel-threads-index-with-new-directory-info.md b/backlog/tasks/task-002 - ST0014-Update-steel-threads-index-with-new-directory-info.md similarity index 93% rename from backlog/tasks/task-2 - ST0014-Update-steel-threads-index-with-new-directory-info.md rename to backlog/tasks/task-002 - ST0014-Update-steel-threads-index-with-new-directory-info.md index baa27f9..16ea551 100644 --- a/backlog/tasks/task-2 - ST0014-Update-steel-threads-index-with-new-directory-info.md +++ b/backlog/tasks/task-002 - ST0014-Update-steel-threads-index-with-new-directory-info.md @@ -1,5 +1,5 @@ --- -id: task-2 +id: task-002 title: ST0014 - Update steel threads index with new directory info status: archived assignee: [] diff --git a/backlog/tasks/task-3 - ST0014-Implement-organize_st.sh-script.md b/backlog/tasks/task-003 - ST0014-Implement-organize_st.sh-script.md similarity index 93% rename from backlog/tasks/task-3 - ST0014-Implement-organize_st.sh-script.md rename to backlog/tasks/task-003 - ST0014-Implement-organize_st.sh-script.md index 679aa1c..bbb2f91 100644 --- a/backlog/tasks/task-3 - ST0014-Implement-organize_st.sh-script.md +++ b/backlog/tasks/task-003 - ST0014-Implement-organize_st.sh-script.md @@ -1,5 +1,5 @@ --- -id: task-3 +id: task-003 title: ST0014 - Implement organize_st.sh script status: archived assignee: [] diff --git a/backlog/tasks/task-4 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md b/backlog/tasks/task-004 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md similarity index 93% rename from backlog/tasks/task-4 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md rename to backlog/tasks/task-004 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md index 273b25a..1b1b427 100644 --- a/backlog/tasks/task-4 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md +++ b/backlog/tasks/task-004 - ST0014-Update-upgrade-script-to-run-organize_st.sh.md @@ -1,5 +1,5 @@ --- -id: task-4 +id: task-004 title: ST0014 - Update upgrade script to run organize_st.sh status: archived assignee: [] diff --git a/backlog/tasks/task-5 - ST0014-Add-tests-for-the-new-directory-structure.md b/backlog/tasks/task-005 - ST0014-Add-tests-for-the-new-directory-structure.md similarity index 93% rename from backlog/tasks/task-5 - ST0014-Add-tests-for-the-new-directory-structure.md rename to backlog/tasks/task-005 - ST0014-Add-tests-for-the-new-directory-structure.md index 15dc5dc..4895cb2 100644 --- a/backlog/tasks/task-5 - ST0014-Add-tests-for-the-new-directory-structure.md +++ b/backlog/tasks/task-005 - ST0014-Add-tests-for-the-new-directory-structure.md @@ -1,5 +1,5 @@ --- -id: task-5 +id: task-005 title: ST0014 - Add tests for the new directory structure status: done assignee: [] diff --git a/backlog/tasks/task-6 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md b/backlog/tasks/task-006 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md similarity index 93% rename from backlog/tasks/task-6 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md rename to backlog/tasks/task-006 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md index ec4c7d1..d8afb26 100644 --- a/backlog/tasks/task-6 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md +++ b/backlog/tasks/task-006 - ST0013-Research-existing-docs-for-blog-0000-Motivation-for-STP.md @@ -1,5 +1,5 @@ --- -id: task-6 +id: task-006 title: ST0013 - Research existing docs for blog 0000 - Motivation for STP status: done assignee: [] diff --git a/backlog/tasks/task-7 - ST0013-Write-introduction-section-for-blog-0000.md b/backlog/tasks/task-007 - ST0013-Write-introduction-section-for-blog-0000.md similarity index 92% rename from backlog/tasks/task-7 - ST0013-Write-introduction-section-for-blog-0000.md rename to backlog/tasks/task-007 - ST0013-Write-introduction-section-for-blog-0000.md index 72b455d..5c57664 100644 --- a/backlog/tasks/task-7 - ST0013-Write-introduction-section-for-blog-0000.md +++ b/backlog/tasks/task-007 - ST0013-Write-introduction-section-for-blog-0000.md @@ -1,5 +1,5 @@ --- -id: task-7 +id: task-007 title: ST0013 - Write introduction section for blog 0000 status: done assignee: [] diff --git a/backlog/tasks/task-8 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md b/backlog/tasks/task-008 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md similarity index 94% rename from backlog/tasks/task-8 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md rename to backlog/tasks/task-008 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md index 69bbaac..eaa3160 100644 --- a/backlog/tasks/task-8 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md +++ b/backlog/tasks/task-008 - ST0013-Write-'Current-challenges-in-development-documentation'-section-for-blog-0000.md @@ -1,5 +1,5 @@ --- -id: task-8 +id: task-008 title: >- ST0013 - Write 'Current challenges in development documentation' section for blog 0000 diff --git a/backlog/tasks/task-9 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md b/backlog/tasks/task-009 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md similarity index 94% rename from backlog/tasks/task-9 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md rename to backlog/tasks/task-009 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md index 1651035..6eeb8da 100644 --- a/backlog/tasks/task-9 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md +++ b/backlog/tasks/task-009 - ST0013-Write-'Why-existing-approaches-fall-short'-section-for-blog-0000.md @@ -1,5 +1,5 @@ --- -id: task-9 +id: task-009 title: ST0013 - Write 'Why existing approaches fall short' section for blog 0000 status: done assignee: [] diff --git a/backlog/tasks/task-10 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md b/backlog/tasks/task-010 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md similarity index 93% rename from backlog/tasks/task-10 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md rename to backlog/tasks/task-010 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md index a68f8ff..33b1ef9 100644 --- a/backlog/tasks/task-10 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md +++ b/backlog/tasks/task-010 - ST0013-Write-'The-vision-behind-STP'-section-for-blog-0000.md @@ -1,5 +1,5 @@ --- -id: task-10 +id: task-010 title: ST0013 - Write 'The vision behind STP' section for blog 0000 status: done assignee: [] diff --git a/backlog/tasks/task-11 - ST0013-Review-and-polish-blog-0000.md b/backlog/tasks/task-011 - ST0013-Review-and-polish-blog-0000.md similarity index 91% rename from backlog/tasks/task-11 - ST0013-Review-and-polish-blog-0000.md rename to backlog/tasks/task-011 - ST0013-Review-and-polish-blog-0000.md index 6244eef..2c71c90 100644 --- a/backlog/tasks/task-11 - ST0013-Review-and-polish-blog-0000.md +++ b/backlog/tasks/task-011 - ST0013-Review-and-polish-blog-0000.md @@ -1,5 +1,5 @@ --- -id: task-11 +id: task-011 title: ST0013 - Review and polish blog 0000 status: done assignee: [] diff --git a/backlog/tasks/task-12 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md b/backlog/tasks/task-012 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md similarity index 92% rename from backlog/tasks/task-12 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md rename to backlog/tasks/task-012 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md index e4196ab..680b863 100644 --- a/backlog/tasks/task-12 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md +++ b/backlog/tasks/task-012 - ST0013-Update-metadata-and-mark-blog-0000-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-12 +id: task-012 title: ST0013 - Update metadata and mark blog 0000 as complete status: done assignee: [] diff --git a/backlog/tasks/task-13 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md b/backlog/tasks/task-013 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md similarity index 93% rename from backlog/tasks/task-13 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md rename to backlog/tasks/task-013 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md index 03c1eae..f523590 100644 --- a/backlog/tasks/task-13 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md +++ b/backlog/tasks/task-013 - ST0013-Research-existing-intro-content-for-blog-0001-Introduction-to-STP.md @@ -1,5 +1,5 @@ --- -id: task-13 +id: task-013 title: ST0013 - Research existing intro content for blog 0001 - Introduction to STP status: done assignee: [] diff --git a/backlog/tasks/task-14 - ST0013-Write-opening-hook-for-blog-0001.md b/backlog/tasks/task-014 - ST0013-Write-opening-hook-for-blog-0001.md similarity index 92% rename from backlog/tasks/task-14 - ST0013-Write-opening-hook-for-blog-0001.md rename to backlog/tasks/task-014 - ST0013-Write-opening-hook-for-blog-0001.md index 74527d2..bab258f 100644 --- a/backlog/tasks/task-14 - ST0013-Write-opening-hook-for-blog-0001.md +++ b/backlog/tasks/task-014 - ST0013-Write-opening-hook-for-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-14 +id: task-014 title: ST0013 - Write opening hook for blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-15 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md b/backlog/tasks/task-015 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md similarity index 92% rename from backlog/tasks/task-15 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md rename to backlog/tasks/task-015 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md index 1244ef2..38710c6 100644 --- a/backlog/tasks/task-15 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md +++ b/backlog/tasks/task-015 - ST0013-Write-'What-is-STP-'-section-for-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-15 +id: task-015 title: ST0013 - Write 'What is STP?' section for blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-16 - ST0013-Write-'Core-principles'-section-for-blog-0001.md b/backlog/tasks/task-016 - ST0013-Write-'Core-principles'-section-for-blog-0001.md similarity index 92% rename from backlog/tasks/task-16 - ST0013-Write-'Core-principles'-section-for-blog-0001.md rename to backlog/tasks/task-016 - ST0013-Write-'Core-principles'-section-for-blog-0001.md index fc15852..0ec21a8 100644 --- a/backlog/tasks/task-16 - ST0013-Write-'Core-principles'-section-for-blog-0001.md +++ b/backlog/tasks/task-016 - ST0013-Write-'Core-principles'-section-for-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-16 +id: task-016 title: ST0013 - Write 'Core principles' section for blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-17 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md b/backlog/tasks/task-017 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md similarity index 93% rename from backlog/tasks/task-17 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md rename to backlog/tasks/task-017 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md index 0fed47d..ec015ab 100644 --- a/backlog/tasks/task-17 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md +++ b/backlog/tasks/task-017 - ST0013-Create-ASCII-diagram-showing-STP-workflow-for-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-17 +id: task-017 title: ST0013 - Create ASCII diagram showing STP workflow for blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-18 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md b/backlog/tasks/task-018 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md similarity index 92% rename from backlog/tasks/task-18 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md rename to backlog/tasks/task-018 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md index 8fab2a0..c927ed9 100644 --- a/backlog/tasks/task-18 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md +++ b/backlog/tasks/task-018 - ST0013-Write-'Why-STP-matters'-section-for-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-18 +id: task-018 title: ST0013 - Write 'Why STP matters' section for blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-19 - ST0013-Review-and-polish-blog-0001.md b/backlog/tasks/task-019 - ST0013-Review-and-polish-blog-0001.md similarity index 91% rename from backlog/tasks/task-19 - ST0013-Review-and-polish-blog-0001.md rename to backlog/tasks/task-019 - ST0013-Review-and-polish-blog-0001.md index f4a3732..5a826ce 100644 --- a/backlog/tasks/task-19 - ST0013-Review-and-polish-blog-0001.md +++ b/backlog/tasks/task-019 - ST0013-Review-and-polish-blog-0001.md @@ -1,5 +1,5 @@ --- -id: task-19 +id: task-019 title: ST0013 - Review and polish blog 0001 status: done assignee: [] diff --git a/backlog/tasks/task-20 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md b/backlog/tasks/task-020 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md similarity index 92% rename from backlog/tasks/task-20 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md rename to backlog/tasks/task-020 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md index 9b0c9f0..727ff5b 100644 --- a/backlog/tasks/task-20 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md +++ b/backlog/tasks/task-020 - ST0013-Update-metadata-and-mark-blog-0001-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-20 +id: task-020 title: ST0013 - Update metadata and mark blog 0001 as complete status: done assignee: [] diff --git a/backlog/tasks/task-21 - ST0013-Research-steel-thread-examples-for-blog-0002.md b/backlog/tasks/task-021 - ST0013-Research-steel-thread-examples-for-blog-0002.md similarity index 92% rename from backlog/tasks/task-21 - ST0013-Research-steel-thread-examples-for-blog-0002.md rename to backlog/tasks/task-021 - ST0013-Research-steel-thread-examples-for-blog-0002.md index bc25d1e..7db3b8d 100644 --- a/backlog/tasks/task-21 - ST0013-Research-steel-thread-examples-for-blog-0002.md +++ b/backlog/tasks/task-021 - ST0013-Research-steel-thread-examples-for-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-21 +id: task-021 title: ST0013 - Research steel thread examples for blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-22 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md b/backlog/tasks/task-022 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md similarity index 93% rename from backlog/tasks/task-22 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md rename to backlog/tasks/task-022 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md index ce60b5a..f84a268 100644 --- a/backlog/tasks/task-22 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md +++ b/backlog/tasks/task-022 - ST0013-Write-'What-is-a-steel-thread-'-section-for-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-22 +id: task-022 title: ST0013 - Write 'What is a steel thread?' section for blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-23 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md b/backlog/tasks/task-023 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md similarity index 93% rename from backlog/tasks/task-23 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md rename to backlog/tasks/task-023 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md index 4e21f62..47e8c66 100644 --- a/backlog/tasks/task-23 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md +++ b/backlog/tasks/task-023 - ST0013-Write-'Steel-threads-vs-traditional-methods'-section-for-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-23 +id: task-023 title: ST0013 - Write 'Steel threads vs traditional methods' section for blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-24 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md b/backlog/tasks/task-024 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md similarity index 93% rename from backlog/tasks/task-24 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md rename to backlog/tasks/task-024 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md index d082cdf..78f3131 100644 --- a/backlog/tasks/task-24 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md +++ b/backlog/tasks/task-024 - ST0013-Create-ASCII-diagram-of-steel-thread-lifecycle-for-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-24 +id: task-024 title: ST0013 - Create ASCII diagram of steel thread lifecycle for blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-25 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md b/backlog/tasks/task-025 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md similarity index 93% rename from backlog/tasks/task-25 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md rename to backlog/tasks/task-025 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md index d530907..48f0318 100644 --- a/backlog/tasks/task-25 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md +++ b/backlog/tasks/task-025 - ST0013-Write-'Benefits-and-examples'-section-for-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-25 +id: task-025 title: ST0013 - Write 'Benefits and examples' section for blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-26 - ST0013-Review-and-polish-blog-0002.md b/backlog/tasks/task-026 - ST0013-Review-and-polish-blog-0002.md similarity index 91% rename from backlog/tasks/task-26 - ST0013-Review-and-polish-blog-0002.md rename to backlog/tasks/task-026 - ST0013-Review-and-polish-blog-0002.md index ee7049a..6e3291c 100644 --- a/backlog/tasks/task-26 - ST0013-Review-and-polish-blog-0002.md +++ b/backlog/tasks/task-026 - ST0013-Review-and-polish-blog-0002.md @@ -1,5 +1,5 @@ --- -id: task-26 +id: task-026 title: ST0013 - Review and polish blog 0002 status: done assignee: [] diff --git a/backlog/tasks/task-27 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md b/backlog/tasks/task-027 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md similarity index 92% rename from backlog/tasks/task-27 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md rename to backlog/tasks/task-027 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md index 56223eb..ac6aff7 100644 --- a/backlog/tasks/task-27 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md +++ b/backlog/tasks/task-027 - ST0013-Update-metadata-and-mark-blog-0002-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-27 +id: task-027 title: ST0013 - Update metadata and mark blog 0002 as complete status: done assignee: [] diff --git a/backlog/tasks/task-28 - ST0013-Research-intent-capture-challenges-for-blog-0003.md b/backlog/tasks/task-028 - ST0013-Research-intent-capture-challenges-for-blog-0003.md similarity index 92% rename from backlog/tasks/task-28 - ST0013-Research-intent-capture-challenges-for-blog-0003.md rename to backlog/tasks/task-028 - ST0013-Research-intent-capture-challenges-for-blog-0003.md index 71b0ab6..487dfca 100644 --- a/backlog/tasks/task-28 - ST0013-Research-intent-capture-challenges-for-blog-0003.md +++ b/backlog/tasks/task-028 - ST0013-Research-intent-capture-challenges-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-28 +id: task-028 title: ST0013 - Research intent capture challenges for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-29 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md b/backlog/tasks/task-029 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md similarity index 92% rename from backlog/tasks/task-29 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md rename to backlog/tasks/task-029 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md index 6fcff66..acbad20 100644 --- a/backlog/tasks/task-29 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md +++ b/backlog/tasks/task-029 - ST0013-Write-'The-intent-problem'-section-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-29 +id: task-029 title: ST0013 - Write 'The intent problem' section for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-30 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md b/backlog/tasks/task-030 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md similarity index 93% rename from backlog/tasks/task-30 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md rename to backlog/tasks/task-030 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md index 56e2848..6955619 100644 --- a/backlog/tasks/task-30 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md +++ b/backlog/tasks/task-030 - ST0013-Write-'How-STP-captures-intent'-section-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-30 +id: task-030 title: ST0013 - Write 'How STP captures intent' section for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-31 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md b/backlog/tasks/task-031 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md similarity index 92% rename from backlog/tasks/task-31 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md rename to backlog/tasks/task-031 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md index 3a52de6..396452c 100644 --- a/backlog/tasks/task-31 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md +++ b/backlog/tasks/task-031 - ST0013-Write-'Intent-and-LLMs'-section-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-31 +id: task-031 title: ST0013 - Write 'Intent and LLMs' section for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-32 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md b/backlog/tasks/task-032 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md similarity index 92% rename from backlog/tasks/task-32 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md rename to backlog/tasks/task-032 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md index 95edd36..1945fdc 100644 --- a/backlog/tasks/task-32 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md +++ b/backlog/tasks/task-032 - ST0013-Create-ASCII-diagram-of-intent-flow-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-32 +id: task-032 title: ST0013 - Create ASCII diagram of intent flow for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-33 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md b/backlog/tasks/task-033 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md similarity index 92% rename from backlog/tasks/task-33 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md rename to backlog/tasks/task-033 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md index 49ad8cd..82cca64 100644 --- a/backlog/tasks/task-33 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md +++ b/backlog/tasks/task-033 - ST0013-Write-'Practical-examples'-section-for-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-33 +id: task-033 title: ST0013 - Write 'Practical examples' section for blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-34 - ST0013-Review-and-polish-blog-0003.md b/backlog/tasks/task-034 - ST0013-Review-and-polish-blog-0003.md similarity index 91% rename from backlog/tasks/task-34 - ST0013-Review-and-polish-blog-0003.md rename to backlog/tasks/task-034 - ST0013-Review-and-polish-blog-0003.md index 5fd65eb..882baa4 100644 --- a/backlog/tasks/task-34 - ST0013-Review-and-polish-blog-0003.md +++ b/backlog/tasks/task-034 - ST0013-Review-and-polish-blog-0003.md @@ -1,5 +1,5 @@ --- -id: task-34 +id: task-034 title: ST0013 - Review and polish blog 0003 status: done assignee: [] diff --git a/backlog/tasks/task-35 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md b/backlog/tasks/task-035 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md similarity index 92% rename from backlog/tasks/task-35 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md rename to backlog/tasks/task-035 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md index b5e992f..2a82bb0 100644 --- a/backlog/tasks/task-35 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md +++ b/backlog/tasks/task-035 - ST0013-Update-metadata-and-mark-blog-0003-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-35 +id: task-035 title: ST0013 - Update metadata and mark blog 0003 as complete status: done assignee: [] diff --git a/backlog/tasks/task-36 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md b/backlog/tasks/task-036 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md similarity index 92% rename from backlog/tasks/task-36 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md rename to backlog/tasks/task-036 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md index 9b857a2..ae07581 100644 --- a/backlog/tasks/task-36 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md +++ b/backlog/tasks/task-036 - ST0013-Research-LLM-collaboration-patterns-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-36 +id: task-036 title: ST0013 - Research LLM collaboration patterns for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-37 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md b/backlog/tasks/task-037 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md similarity index 92% rename from backlog/tasks/task-37 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md rename to backlog/tasks/task-037 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md index 6d91d47..097932e 100644 --- a/backlog/tasks/task-37 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md +++ b/backlog/tasks/task-037 - ST0013-Write-'STP-design-for-LLMs'-section-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-37 +id: task-037 title: ST0013 - Write 'STP design for LLMs' section for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-38 - ST0013-Write-'Context-management'-section-for-blog-0004.md b/backlog/tasks/task-038 - ST0013-Write-'Context-management'-section-for-blog-0004.md similarity index 92% rename from backlog/tasks/task-38 - ST0013-Write-'Context-management'-section-for-blog-0004.md rename to backlog/tasks/task-038 - ST0013-Write-'Context-management'-section-for-blog-0004.md index e566d43..20b39b2 100644 --- a/backlog/tasks/task-38 - ST0013-Write-'Context-management'-section-for-blog-0004.md +++ b/backlog/tasks/task-038 - ST0013-Write-'Context-management'-section-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-38 +id: task-038 title: ST0013 - Write 'Context management' section for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-39 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md b/backlog/tasks/task-039 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md similarity index 93% rename from backlog/tasks/task-39 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md rename to backlog/tasks/task-039 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md index 917baf2..bc247e8 100644 --- a/backlog/tasks/task-39 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md +++ b/backlog/tasks/task-039 - ST0013-Write-'Templates-and-structure'-section-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-39 +id: task-039 title: ST0013 - Write 'Templates and structure' section for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-40 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md b/backlog/tasks/task-040 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md similarity index 92% rename from backlog/tasks/task-40 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md rename to backlog/tasks/task-040 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md index 9e6094a..7cd6bf4 100644 --- a/backlog/tasks/task-40 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md +++ b/backlog/tasks/task-040 - ST0013-Create-ASCII-diagram-of-LLM-workflow-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-40 +id: task-040 title: ST0013 - Create ASCII diagram of LLM workflow for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-41 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md b/backlog/tasks/task-041 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md similarity index 92% rename from backlog/tasks/task-41 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md rename to backlog/tasks/task-041 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md index f18281e..5ab69da 100644 --- a/backlog/tasks/task-41 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md +++ b/backlog/tasks/task-041 - ST0013-Write-'Future-opportunities'-section-for-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-41 +id: task-041 title: ST0013 - Write 'Future opportunities' section for blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-42 - ST0013-Review-and-polish-blog-0004.md b/backlog/tasks/task-042 - ST0013-Review-and-polish-blog-0004.md similarity index 91% rename from backlog/tasks/task-42 - ST0013-Review-and-polish-blog-0004.md rename to backlog/tasks/task-042 - ST0013-Review-and-polish-blog-0004.md index c29f33e..6fe0d54 100644 --- a/backlog/tasks/task-42 - ST0013-Review-and-polish-blog-0004.md +++ b/backlog/tasks/task-042 - ST0013-Review-and-polish-blog-0004.md @@ -1,5 +1,5 @@ --- -id: task-42 +id: task-042 title: ST0013 - Review and polish blog 0004 status: done assignee: [] diff --git a/backlog/tasks/task-43 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md b/backlog/tasks/task-043 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md similarity index 92% rename from backlog/tasks/task-43 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md rename to backlog/tasks/task-043 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md index 0b954f9..037bf4d 100644 --- a/backlog/tasks/task-43 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md +++ b/backlog/tasks/task-043 - ST0013-Update-metadata-and-mark-blog-0004-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-43 +id: task-043 title: ST0013 - Update metadata and mark blog 0004 as complete status: done assignee: [] diff --git a/backlog/tasks/task-44 - ST0013-Write-'Installation'-section-for-blog-0005.md b/backlog/tasks/task-044 - ST0013-Write-'Installation'-section-for-blog-0005.md similarity index 92% rename from backlog/tasks/task-44 - ST0013-Write-'Installation'-section-for-blog-0005.md rename to backlog/tasks/task-044 - ST0013-Write-'Installation'-section-for-blog-0005.md index bbac79b..4486e00 100644 --- a/backlog/tasks/task-44 - ST0013-Write-'Installation'-section-for-blog-0005.md +++ b/backlog/tasks/task-044 - ST0013-Write-'Installation'-section-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-44 +id: task-044 title: ST0013 - Write 'Installation' section for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-45 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md b/backlog/tasks/task-045 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md similarity index 92% rename from backlog/tasks/task-45 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md rename to backlog/tasks/task-045 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md index 42382a2..3be027f 100644 --- a/backlog/tasks/task-45 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md +++ b/backlog/tasks/task-045 - ST0013-Write-'Basic-commands'-section-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-45 +id: task-045 title: ST0013 - Write 'Basic commands' section for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-46 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md b/backlog/tasks/task-046 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md similarity index 93% rename from backlog/tasks/task-46 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md rename to backlog/tasks/task-046 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md index 55bf55d..6474294 100644 --- a/backlog/tasks/task-46 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md +++ b/backlog/tasks/task-046 - ST0013-Write-'Creating-your-first-steel-thread'-section-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-46 +id: task-046 title: ST0013 - Write 'Creating your first steel thread' section for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-47 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md b/backlog/tasks/task-047 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md similarity index 93% rename from backlog/tasks/task-47 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md rename to backlog/tasks/task-047 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md index 8d3d96a..6e5f759 100644 --- a/backlog/tasks/task-47 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md +++ b/backlog/tasks/task-047 - ST0013-Write-'STP+Backlog-workflow'-meta-section-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-47 +id: task-047 title: ST0013 - Write 'STP+Backlog workflow' meta-section for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-48 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md b/backlog/tasks/task-048 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md similarity index 93% rename from backlog/tasks/task-48 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md rename to backlog/tasks/task-048 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md index 9aefc45..f571789 100644 --- a/backlog/tasks/task-48 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md +++ b/backlog/tasks/task-048 - ST0013-Add-real-command-outputs-from-blog-writing-process-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-48 +id: task-048 title: ST0013 - Add real command outputs from blog writing process for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-49 - ST0013-Write-'Best-practices'-section-for-blog-0005.md b/backlog/tasks/task-049 - ST0013-Write-'Best-practices'-section-for-blog-0005.md similarity index 92% rename from backlog/tasks/task-49 - ST0013-Write-'Best-practices'-section-for-blog-0005.md rename to backlog/tasks/task-049 - ST0013-Write-'Best-practices'-section-for-blog-0005.md index 67859d4..1a5f07f 100644 --- a/backlog/tasks/task-49 - ST0013-Write-'Best-practices'-section-for-blog-0005.md +++ b/backlog/tasks/task-049 - ST0013-Write-'Best-practices'-section-for-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-49 +id: task-049 title: ST0013 - Write 'Best practices' section for blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-50 - ST0013-Review-and-polish-blog-0005.md b/backlog/tasks/task-050 - ST0013-Review-and-polish-blog-0005.md similarity index 91% rename from backlog/tasks/task-50 - ST0013-Review-and-polish-blog-0005.md rename to backlog/tasks/task-050 - ST0013-Review-and-polish-blog-0005.md index ab3903f..2262a7d 100644 --- a/backlog/tasks/task-50 - ST0013-Review-and-polish-blog-0005.md +++ b/backlog/tasks/task-050 - ST0013-Review-and-polish-blog-0005.md @@ -1,5 +1,5 @@ --- -id: task-50 +id: task-050 title: ST0013 - Review and polish blog 0005 status: done assignee: [] diff --git a/backlog/tasks/task-51 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md b/backlog/tasks/task-051 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md similarity index 92% rename from backlog/tasks/task-51 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md rename to backlog/tasks/task-051 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md index 925d69e..628949d 100644 --- a/backlog/tasks/task-51 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md +++ b/backlog/tasks/task-051 - ST0013-Update-metadata-and-mark-blog-0005-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-51 +id: task-051 title: ST0013 - Update metadata and mark blog 0005 as complete status: done assignee: [] diff --git a/backlog/tasks/task-52 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md b/backlog/tasks/task-052 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md similarity index 92% rename from backlog/tasks/task-52 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md rename to backlog/tasks/task-052 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md index 4d46543..876989d 100644 --- a/backlog/tasks/task-52 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md +++ b/backlog/tasks/task-052 - ST0013-Write-'Current-state-of-STP'-section-for-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-52 +id: task-052 title: ST0013 - Write 'Current state of STP' section for blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-53 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md b/backlog/tasks/task-053 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md similarity index 92% rename from backlog/tasks/task-53 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md rename to backlog/tasks/task-053 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md index 01dd845..0a86b2f 100644 --- a/backlog/tasks/task-53 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md +++ b/backlog/tasks/task-053 - ST0013-Write-'Lessons-learned'-section-for-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-53 +id: task-053 title: ST0013 - Write 'Lessons learned' section for blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-54 - ST0013-Write-'Roadmap'-section-for-blog-0006.md b/backlog/tasks/task-054 - ST0013-Write-'Roadmap'-section-for-blog-0006.md similarity index 92% rename from backlog/tasks/task-54 - ST0013-Write-'Roadmap'-section-for-blog-0006.md rename to backlog/tasks/task-054 - ST0013-Write-'Roadmap'-section-for-blog-0006.md index bbdaa0c..f34ba70 100644 --- a/backlog/tasks/task-54 - ST0013-Write-'Roadmap'-section-for-blog-0006.md +++ b/backlog/tasks/task-054 - ST0013-Write-'Roadmap'-section-for-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-54 +id: task-054 title: ST0013 - Write 'Roadmap' section for blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-55 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md b/backlog/tasks/task-055 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md similarity index 93% rename from backlog/tasks/task-55 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md rename to backlog/tasks/task-055 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md index 6fe223f..5fc001e 100644 --- a/backlog/tasks/task-55 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md +++ b/backlog/tasks/task-055 - ST0013-Write-'Integration-opportunities'-section-for-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-55 +id: task-055 title: ST0013 - Write 'Integration opportunities' section for blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-56 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md b/backlog/tasks/task-056 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md similarity index 93% rename from backlog/tasks/task-56 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md rename to backlog/tasks/task-056 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md index cade2b9..7beb19a 100644 --- a/backlog/tasks/task-56 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md +++ b/backlog/tasks/task-056 - ST0013-Write-'Community-and-contributions'-section-for-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-56 +id: task-056 title: ST0013 - Write 'Community and contributions' section for blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-57 - ST0013-Review-and-polish-blog-0006.md b/backlog/tasks/task-057 - ST0013-Review-and-polish-blog-0006.md similarity index 91% rename from backlog/tasks/task-57 - ST0013-Review-and-polish-blog-0006.md rename to backlog/tasks/task-057 - ST0013-Review-and-polish-blog-0006.md index 4d0c8dd..1ac0875 100644 --- a/backlog/tasks/task-57 - ST0013-Review-and-polish-blog-0006.md +++ b/backlog/tasks/task-057 - ST0013-Review-and-polish-blog-0006.md @@ -1,5 +1,5 @@ --- -id: task-57 +id: task-057 title: ST0013 - Review and polish blog 0006 status: done assignee: [] diff --git a/backlog/tasks/task-58 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md b/backlog/tasks/task-058 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md similarity index 92% rename from backlog/tasks/task-58 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md rename to backlog/tasks/task-058 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md index cd7ed2f..cb7562f 100644 --- a/backlog/tasks/task-58 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md +++ b/backlog/tasks/task-058 - ST0013-Update-metadata-and-mark-blog-0006-as-complete.md @@ -1,5 +1,5 @@ --- -id: task-58 +id: task-058 title: ST0013 - Update metadata and mark blog 0006 as complete status: done assignee: [] diff --git a/backlog/tasks/task-059 - ST0016-Create-examples-directory-structure.md b/backlog/tasks/task-059 - ST0016-Create-examples-directory-structure.md new file mode 100644 index 0000000..c0e7295 --- /dev/null +++ b/backlog/tasks/task-059 - ST0016-Create-examples-directory-structure.md @@ -0,0 +1,11 @@ +--- +id: task-059 +title: ST0016 - Create examples directory structure +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-060 - ST0016-Create-v0.0.0-example-project.md b/backlog/tasks/task-060 - ST0016-Create-v0.0.0-example-project.md new file mode 100644 index 0000000..f03b9a8 --- /dev/null +++ b/backlog/tasks/task-060 - ST0016-Create-v0.0.0-example-project.md @@ -0,0 +1,11 @@ +--- +id: task-060 +title: ST0016 - Create v0.0.0 example project +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-061 - ST0016-Create-v1.2.0-example-project.md b/backlog/tasks/task-061 - ST0016-Create-v1.2.0-example-project.md new file mode 100644 index 0000000..5be4caf --- /dev/null +++ b/backlog/tasks/task-061 - ST0016-Create-v1.2.0-example-project.md @@ -0,0 +1,11 @@ +--- +id: task-061 +title: ST0016 - Create v1.2.0 example project +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-062 - ST0016-Create-v1.2.1-example-project.md b/backlog/tasks/task-062 - ST0016-Create-v1.2.1-example-project.md new file mode 100644 index 0000000..044c92e --- /dev/null +++ b/backlog/tasks/task-062 - ST0016-Create-v1.2.1-example-project.md @@ -0,0 +1,11 @@ +--- +id: task-062 +title: ST0016 - Create v1.2.1 example project +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-063 - ST0016-Create-hello-world-v2.0.0-project.md b/backlog/tasks/task-063 - ST0016-Create-hello-world-v2.0.0-project.md new file mode 100644 index 0000000..2b9dbef --- /dev/null +++ b/backlog/tasks/task-063 - ST0016-Create-hello-world-v2.0.0-project.md @@ -0,0 +1,11 @@ +--- +id: task-063 +title: ST0016 - Create hello-world v2.0.0 project +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-064 - ST0016-Write-comprehensive-BATS-tests.md b/backlog/tasks/task-064 - ST0016-Write-comprehensive-BATS-tests.md new file mode 100644 index 0000000..0e8538b --- /dev/null +++ b/backlog/tasks/task-064 - ST0016-Write-comprehensive-BATS-tests.md @@ -0,0 +1,11 @@ +--- +id: task-064 +title: ST0016 - Write comprehensive BATS tests +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-065 - ST0016-Implement-intent_bootstrap-in-top-level-bin.md b/backlog/tasks/task-065 - ST0016-Implement-intent_bootstrap-in-top-level-bin.md new file mode 100644 index 0000000..e4e2b5a --- /dev/null +++ b/backlog/tasks/task-065 - ST0016-Implement-intent_bootstrap-in-top-level-bin.md @@ -0,0 +1,12 @@ +--- +id: task-065 +title: ST0016 - Implement intent_bootstrap in top-level bin +status: done +assignee: [] +created_date: '2025-07-16' +updated_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-066 - ST0016-Implement-intent_doctor-in-top-level-bin.md b/backlog/tasks/task-066 - ST0016-Implement-intent_doctor-in-top-level-bin.md new file mode 100644 index 0000000..d3a2d72 --- /dev/null +++ b/backlog/tasks/task-066 - ST0016-Implement-intent_doctor-in-top-level-bin.md @@ -0,0 +1,12 @@ +--- +id: task-066 +title: ST0016 - Implement intent_doctor in top-level bin +status: done +assignee: [] +created_date: '2025-07-16' +updated_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-067 - ST0016-Create-JSON-config-parser-in-top-level-bin.md b/backlog/tasks/task-067 - ST0016-Create-JSON-config-parser-in-top-level-bin.md new file mode 100644 index 0000000..23818dd --- /dev/null +++ b/backlog/tasks/task-067 - ST0016-Create-JSON-config-parser-in-top-level-bin.md @@ -0,0 +1,12 @@ +--- +id: task-067 +title: ST0016 - Create JSON config parser in top-level bin +status: done +assignee: [] +created_date: '2025-07-16' +updated_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-068 - ST0016-Implement-intent_upgrade-for-migrations.md b/backlog/tasks/task-068 - ST0016-Implement-intent_upgrade-for-migrations.md new file mode 100644 index 0000000..84570be --- /dev/null +++ b/backlog/tasks/task-068 - ST0016-Implement-intent_upgrade-for-migrations.md @@ -0,0 +1,12 @@ +--- +id: task-068 +title: ST0016 - Implement intent_upgrade for migrations +status: done +assignee: [] +created_date: '2025-07-16' +updated_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/backlog/tasks/task-069 - ST0016-Test-migrations-on-example-projects.md b/backlog/tasks/task-069 - ST0016-Test-migrations-on-example-projects.md new file mode 100644 index 0000000..3397ce8 --- /dev/null +++ b/backlog/tasks/task-069 - ST0016-Test-migrations-on-example-projects.md @@ -0,0 +1,31 @@ +--- +id: task-069 +title: ST0016 - Test migrations on example projects +status: done +assignee: [] +created_date: '2025-07-16' +completed_date: '2025-07-17' +labels: [] +dependencies: [] +--- + +## Description + +Test the `intent upgrade` command on all example projects (v0.0.0, v1.2.0, v1.2.1) to ensure migrations work correctly. + +## Results + +✅ All example projects successfully migrated to Intent v2.0.0: +- v0.0.0-project: Migrated (manual fix required for missing YAML frontmatter) +- v1.2.0-project: Migrated (manual fix required for frontmatter conversion issue) +- v1.2.1-project: Migrated successfully with automatic upgrade + +All projects now: +- Use Intent v2.0.0 directory structure (intent/ instead of stp/) +- Have .intent/config.json configuration +- Pass `intent doctor` checks +- Show steel threads correctly with `intent st list` + +## Notes + +The upgrade script has an issue with the convert_yaml_frontmatter function that needs fixing for v0.0.0 and v1.2.0 projects. The function doesn't properly handle files without YAML frontmatter. diff --git a/backlog/tasks/task-070 - ST0016-Execute-self-migration-to-new-structure.md b/backlog/tasks/task-070 - ST0016-Execute-self-migration-to-new-structure.md new file mode 100644 index 0000000..74ecd2e --- /dev/null +++ b/backlog/tasks/task-070 - ST0016-Execute-self-migration-to-new-structure.md @@ -0,0 +1,11 @@ +--- +id: task-070 +title: ST0016 - Execute self-migration to new structure +status: todo +assignee: [] +created_date: '2025-07-16' +labels: [] +dependencies: [] +--- + +## Description diff --git a/bin/intent b/bin/intent new file mode 100755 index 0000000..4a6de33 --- /dev/null +++ b/bin/intent @@ -0,0 +1,172 @@ +#!/bin/bash +# intent - Main command for Intent +# Copyright (c) 2024 Matthew Sinclair +# Licensed under the MIT License (see LICENSE file) +# Usage: intent <command> [options] [arguments] + +# Exit on error +set -e + +# Determine INTENT_HOME early for version loading +if [ -z "$INTENT_HOME" ]; then + INTENT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +fi + +# Source helpers for version function +if [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" +fi + +# Get version from centralized source +INTENT_VERSION="$(get_intent_version 2>/dev/null || echo "2.2.1")" + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Export INTENT_HOME +export INTENT_HOME + +# Check if bin directory exists +if [ ! -d "$INTENT_HOME/bin" ]; then + error "Invalid INTENT_HOME: bin directory not found at $INTENT_HOME/bin" +fi + +# Set bin directory +BIN_DIR="$INTENT_HOME/bin" + +# Define global commands that don't require project context +GLOBAL_COMMANDS="help doctor bootstrap init version info fileindex upgrade" + +# Function to check if command is global +is_global_command() { + local cmd="$1" + for gc in $GLOBAL_COMMANDS; do + [ "$cmd" = "$gc" ] && return 0 + done + return 1 +} + +# Display info if no arguments provided +if [ $# -eq 0 ]; then + # Info is a global command, execute directly + exec "$BIN_DIR/intent_info" +fi + +# Get the command +COMMAND="$1" +shift + +# Handle version flag +if [ "$COMMAND" = "--version" ] || [ "$COMMAND" = "-v" ] || [ "$COMMAND" = "version" ]; then + echo "Intent version $INTENT_VERSION" + exit 0 +fi + +# Handle help command specially +if [ "$COMMAND" = "help" ] || [ "$COMMAND" = "--help" ] || [ "$COMMAND" = "-h" ]; then + exec "$BIN_DIR/intent_help" "$@" +fi + +# Handle plugin commands +PLUGIN_COMMAND="" +PLUGIN_SUBCOMMAND="" + +# Check for plugin commands (claude, agents) +case "$COMMAND" in + claude) + # Claude plugin - check for subcommand + if [ $# -ge 1 ] && [ "$1" = "subagents" ]; then + PLUGIN_COMMAND="claude" + PLUGIN_SUBCOMMAND="subagents" + shift # Remove 'subagents' from arguments + COMMAND_SCRIPT="$INTENT_HOME/intent/plugins/claude/bin/intent_claude_subagents" + else + error "Unknown claude subcommand. Try: intent claude subagents" + fi + ;; + agents) + # AGENTS.md plugin + PLUGIN_COMMAND="agents" + COMMAND_SCRIPT="$INTENT_HOME/intent/plugins/agents/bin/intent_agents" + ;; + st) + COMMAND="st" + COMMAND_SCRIPT="intent_st" + ;; + bl) + COMMAND="bl" + COMMAND_SCRIPT="intent_bl" + ;; + *) + # Default: prefix with intent_ + COMMAND_SCRIPT="intent_$COMMAND" + ;; +esac + +# Check if command script exists +if [ -n "$PLUGIN_COMMAND" ]; then + # For plugin commands, check plugin path + if [ ! -f "$COMMAND_SCRIPT" ]; then + error "Plugin command not found: $COMMAND_SCRIPT" + fi +else + # For regular commands, check bin directory + if [ ! -f "$BIN_DIR/$COMMAND_SCRIPT" ]; then + error "Unknown command '$COMMAND'. Run 'intent help' for usage information." + fi +fi + +# Check if script is executable +if [ -n "$PLUGIN_COMMAND" ]; then + # For plugin commands + if [ ! -x "$COMMAND_SCRIPT" ]; then + echo "Warning: Making plugin script executable: $COMMAND_SCRIPT" >&2 + chmod +x "$COMMAND_SCRIPT" + fi +else + # For regular commands + if [ ! -x "$BIN_DIR/$COMMAND_SCRIPT" ]; then + echo "Warning: Making script executable: $COMMAND_SCRIPT" >&2 + chmod +x "$BIN_DIR/$COMMAND_SCRIPT" + fi +fi + +# Check if this is a global command or plugin command +if [ -n "$PLUGIN_COMMAND" ]; then + # Plugin commands may have their own context requirements + # Let the plugin handle it + exec "$COMMAND_SCRIPT" "$@" +elif is_global_command "$COMMAND"; then + # Execute without requiring project context + exec "$BIN_DIR/$COMMAND_SCRIPT" "$@" +fi + +# For project commands, source config and check for project root +if [ -f "$BIN_DIR/intent_config" ]; then + source "$BIN_DIR/intent_config" + # Load configuration + load_intent_config + + # Check if we found a project root (check for empty or unset) + if [ -z "${PROJECT_ROOT:-}" ] || [ "$PROJECT_ROOT" = "" ]; then + echo "Error: Not in an Intent project directory." >&2 + echo "" >&2 + echo "The '$COMMAND' command requires an Intent project." >&2 + echo "" >&2 + echo "To create a new project: intent init" >&2 + echo "To see all commands: intent help" >&2 + exit 1 + fi +else + error "Missing intent_config library. Intent installation may be corrupted." +fi + +# Execute command with remaining arguments +if [ -n "$PLUGIN_COMMAND" ]; then + exec "$COMMAND_SCRIPT" "$@" +else + exec "$BIN_DIR/$COMMAND_SCRIPT" "$@" +fi \ No newline at end of file diff --git a/bin/intent_backlog b/bin/intent_backlog new file mode 100755 index 0000000..8722659 --- /dev/null +++ b/bin/intent_backlog @@ -0,0 +1,450 @@ +#!/bin/bash +# intent_backlog - Wrapper for Backlog.md task management integration +# Usage: intent backlog <command> [options] [arguments] +# Also available as: intent bl <command> [options] [arguments] + +# Exit on error +set -e + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Get script directory first +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Get INTENT_HOME from environment or determine from script location +if [ -z "$INTENT_HOME" ]; then + export INTENT_HOME="$(cd "$SCRIPT_DIR/.." && pwd)" +fi + +# Source configuration helpers +source "$SCRIPT_DIR/intent_config" + +# Load configuration +load_intent_config + +# Check if backlog is installed +if ! command -v backlog &> /dev/null; then + echo "Error: Backlog.md is not installed" >&2 + echo "" >&2 + cat "$INTENT_HOME/bin/.help/backlog-install.help.md" >&2 + exit 1 +fi + +# Valid backlog statuses +VALID_BACKLOG_STATUSES=("todo" "wip" "done" "cancelled" "archived") + +# Function to validate backlog status +validate_backlog_status() { + local status="$1" + for valid_status in "${VALID_BACKLOG_STATUSES[@]}"; do + if [ "$status" = "$valid_status" ]; then + return 0 + fi + done + return 1 +} + +# Function to display usage +usage() { + echo "Usage: intent backlog <command> [options] [arguments]" + echo " intent bl <command> [options] [arguments]" + echo "" + echo "Intent wrapper for Backlog.md task management" + echo "" + echo "Commands:" + echo " init Initialize backlog in current project" + echo " task <subcommand> Task management (create, list, edit, etc.)" + echo " task pad <id|--all> [--size n] Zero-pad task IDs retroactively" + echo " list List tasks (uses backlog_list_status filter from config)" + echo " create <ST####> <title> Create a task linked to a steel thread" + echo " board Display tasks in Kanban board" + echo " config Manage backlog configuration" + echo " browser Open browser interface" + echo "" + echo "This wrapper:" + echo " - Automatically uses --plain for list commands to avoid git errors" + echo " - Disables remote operations for local projects" + echo " - Provides shortcuts for common Intent workflows" + echo "" + echo "Examples:" + echo " intent bl list # List tasks matching backlog_list_status" + echo " intent bl list --all # List all tasks regardless of status" + echo " intent bl list -s todo # List only todo tasks" + echo " intent bl create ST0014 \"Fix bug\" # Create task linked to ST0014" + echo " intent bl task edit task-5 # Edit a specific task" + echo " intent bl task pad task-9 --size 3 # Pad task-9 to task-009" + echo " intent bl task pad --all --size 3 # Pad all tasks to 3 digits" + echo " intent bl task pad --all # Pad all tasks using configured size" + echo "" + echo "For full backlog documentation, run: backlog help" +} + +# Initialize backlog with STP-friendly defaults +init_backlog() { + # Run backlog init + backlog init "$@" + + # Configure for local use + if [ -f "backlog/config.yml" ]; then + echo "Configuring backlog for Intent integration..." + # Disable remote operations to prevent git fetch errors + backlog config set remoteOperations false >/dev/null 2>&1 || true + # Set default status to match Intent conventions + backlog config set defaultStatus "To Do" >/dev/null 2>&1 || true + echo "Backlog configured for local Intent use." + fi +} + +# Create a task with Intent conventions +create_task() { + local st_id="$1" + local title="$2" + + if [ -z "$st_id" ] || [ -z "$title" ]; then + error "Usage: intent bl create <ST####> <title>" + fi + + # Validate steel thread ID format + if ! echo "$st_id" | grep -qE '^ST[0-9]{4}$'; then + error "Invalid steel thread ID format. Expected: ST####" + fi + + # Create the task with full title + local full_title="$st_id - $title" + backlog task create "$full_title" +} + +# Pad task IDs with zeros +pad_tasks() { + local task_id="" + local size="" + local all_tasks=false + + # Parse arguments + while [ $# -gt 0 ]; do + case "$1" in + --all) + all_tasks=true + shift + ;; + --size) + if [ -z "$2" ] || ! [[ "$2" =~ ^[0-9]+$ ]]; then + error "Invalid --size value. Must be a positive number." + fi + size="$2" + shift 2 + ;; + *) + if [ -z "$task_id" ] && [[ "$1" =~ ^task-[0-9]+$ ]]; then + task_id="$1" + shift + else + error "Invalid argument: $1" + fi + ;; + esac + done + + # If no size specified, try to get it from backlog config + if [ -z "$size" ]; then + # Check if backlog is configured with zeroPaddedIds + if command -v backlog &> /dev/null && [ -f "backlog/config.yml" ]; then + local configured_size=$(backlog config get zeroPaddedIds 2>/dev/null | grep -E '^[0-9]+$' || echo "") + if [ -n "$configured_size" ]; then + size="$configured_size" + echo "Using configured zero padding size: $size" + else + error "No --size specified and no zeroPaddedIds configured in backlog" + fi + else + error "No --size specified and backlog not configured" + fi + fi + + if [ "$all_tasks" = true ] && [ -n "$task_id" ]; then + error "Cannot specify both a task ID and --all" + fi + + if [ "$all_tasks" = false ] && [ -z "$task_id" ]; then + error "Must specify either a task ID or --all" + fi + + # Process tasks + echo "Padding tasks to $size digits..." + + # Initialize counters + local tasks_updated=0 + local tasks_already_padded=0 + local tasks_errors=0 + local archive_updated=0 + local archive_already_padded=0 + local archive_errors=0 + + # Process tasks in a directory + process_directory() { + local dir="$1" + local count_var_prefix="$2" + + if [ ! -d "$dir" ]; then + return + fi + + # Find all task files + for file in "$dir"/task-*; do + if [ -f "$file" ]; then + local filename=$(basename "$file") + + # Extract task number and rest of filename + if [[ $filename =~ ^task-([0-9]+)(.*)$ ]]; then + local task_num="${BASH_REMATCH[1]}" + local rest="${BASH_REMATCH[2]}" + + # Check if this is the specific task we're looking for (single task mode) + if [ "$all_tasks" = false ] && [ "task-$task_num" != "$task_id" ]; then + continue + fi + + # Pad the number (force base 10 to handle leading zeros) + local padded_num=$(printf "%0${size}d" $((10#$task_num))) + + # Only process if padding is needed + local old_filename="$filename" + local new_filename="task-${padded_num}${rest}" + local old_path="$dir/$old_filename" + local new_path="$dir/$new_filename" + + if [ "$old_filename" != "$new_filename" ]; then + echo "Padding: $old_filename -> $new_filename" + + # Rename the file + if ! mv "$old_path" "$new_path"; then + echo "Error: Failed to rename $old_path" >&2 + eval "${count_var_prefix}_errors=\$((${count_var_prefix}_errors + 1))" + continue + fi + + # Update the id field in the file + local task_num_no_zeros=$((10#$task_num)) + if grep -q "^id: task-${task_num_no_zeros}$" "$new_path"; then + if ! sed -i.bak "s/^id: task-${task_num_no_zeros}$/id: task-${padded_num}/" "$new_path"; then + echo "Error: Failed to update ID in $new_path" >&2 + # Try to restore the original filename + mv "$new_path" "$old_path" 2>/dev/null + eval "${count_var_prefix}_errors=\$((${count_var_prefix}_errors + 1))" + continue + fi + rm -f "${new_path}.bak" + fi + + eval "${count_var_prefix}_updated=\$((${count_var_prefix}_updated + 1))" + else + eval "${count_var_prefix}_already_padded=\$((${count_var_prefix}_already_padded + 1))" + fi + fi + fi + done + } + + # Process main tasks directory + process_directory "backlog/tasks" "tasks" + + # Process archive tasks directory + process_directory "backlog/archive/tasks" "archive" + + # Calculate totals + local total_updated=$((tasks_updated + archive_updated)) + local total_already_padded=$((tasks_already_padded + archive_already_padded)) + local total_errors=$((tasks_errors + archive_errors)) + + # Report results + if [ "$all_tasks" = true ]; then + echo "Processed backlog/tasks/: $tasks_updated files updated, $tasks_already_padded already padded" + echo "Processed backlog/archive/tasks/: $archive_updated files updated, $archive_already_padded already padded" + echo "Total: $total_updated tasks updated" + else + if [ $total_updated -eq 1 ]; then + echo "Successfully padded task" + elif [ $total_already_padded -gt 0 ]; then + echo "Task '$task_id' is already padded to $size digits" + else + echo "Error: Task '$task_id' not found" + exit 1 + fi + fi + + if [ $total_errors -gt 0 ]; then + echo "Warning: $total_errors errors occurred during processing" >&2 + fi + + # Suggest setting zeroPaddedIds if needed + if [ $total_updated -gt 0 ]; then + echo "" + echo "Note: To ensure new tasks use the same padding, run:" + echo " intent bl config set zeroPaddedIds $size" + fi +} + +# Process commands +case "${1:-}" in + init) + shift + init_backlog "$@" + ;; + + list) + # Shortcut for 'task list --plain' + shift + + # Get default status from config + default_status="${BACKLOG_LIST_STATUS:-}" + + # If config has a default status, validate it + if [ -n "$default_status" ]; then + if ! validate_backlog_status "$default_status"; then + echo "Warning: Invalid backlog_list_status '$default_status' in config. Valid statuses are: ${VALID_BACKLOG_STATUSES[*]}" >&2 + default_status="" + fi + fi + + # Check if user provided -s option or --all + has_status_filter=false + show_all=false + for arg in "$@"; do + if [ "$arg" = "-s" ] || [ "$arg" = "--status" ]; then + has_status_filter=true + elif [ "$arg" = "--all" ]; then + show_all=true + fi + done + + # Build command + cmd_args=() + + # Add --plain if not already present + if ! echo "$@" | grep -q -- "--plain"; then + cmd_args+=("--plain") + fi + + # Add default status filter if no status filter provided and we have a default + # But skip if --all is provided + if [ "$has_status_filter" = false ] && [ "$show_all" = false ] && [ -n "$default_status" ]; then + cmd_args+=("-s" "$default_status") + fi + + # Add all user arguments except --all + for arg in "$@"; do + if [ "$arg" != "--all" ]; then + cmd_args+=("$arg") + fi + done + + # Execute backlog command + backlog task list "${cmd_args[@]}" + ;; + + create) + # Special STP create command + shift + create_task "$@" + ;; + + board) + shift + # Pass through board command without modification + # Note: board doesn't support --plain option + backlog board "$@" + ;; + + task|tasks) + # Handle task subcommands + subcommand="${2:-}" + case "$subcommand" in + list) + # Add --plain to task list + shift 2 + + # Get default status from config + default_status="${BACKLOG_LIST_STATUS:-}" + + # If config has a default status, validate it + if [ -n "$default_status" ]; then + if ! validate_backlog_status "$default_status"; then + echo "Warning: Invalid backlog_list_status '$default_status' in config. Valid statuses are: ${VALID_BACKLOG_STATUSES[*]}" >&2 + default_status="" + fi + fi + + # Check if user provided -s option or --all + has_status_filter=false + show_all=false + for arg in "$@"; do + if [ "$arg" = "-s" ] || [ "$arg" = "--status" ]; then + has_status_filter=true + elif [ "$arg" = "--all" ]; then + show_all=true + fi + done + + # Build command + cmd_args=() + + # Add --plain if not already present + if ! echo "$@" | grep -q -- "--plain"; then + cmd_args+=("--plain") + fi + + # Add default status filter if no status filter provided and we have a default + # But skip if --all is provided + if [ "$has_status_filter" = false ] && [ "$show_all" = false ] && [ -n "$default_status" ]; then + cmd_args+=("-s" "$default_status") + fi + + # Add all user arguments except --all + for arg in "$@"; do + if [ "$arg" != "--all" ]; then + cmd_args+=("$arg") + fi + done + + # Execute backlog command + backlog task list "${cmd_args[@]}" + ;; + pad) + # Handle task padding + shift 2 # Remove 'task' and 'pad' + pad_tasks "$@" + ;; + *) + # Pass through other task commands + shift + backlog task "$@" + ;; + esac + ;; + + config) + # Pass through config commands + shift + backlog config "$@" + ;; + + browser) + # Pass through browser command + shift + backlog browser "$@" + ;; + + -h|--help|help|"") + usage + exit 0 + ;; + + *) + # Pass through any other commands + backlog "$@" + ;; +esac \ No newline at end of file diff --git a/bin/intent_bl b/bin/intent_bl new file mode 100755 index 0000000..8a4e24c --- /dev/null +++ b/bin/intent_bl @@ -0,0 +1,9 @@ +#!/bin/bash +# intent_bl - Shorthand alias for intent_backlog +# This simply calls intent_backlog with all arguments + +# Get the directory where this script is located +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Call intent_backlog with all arguments +exec "$SCRIPT_DIR/intent_backlog" "$@" \ No newline at end of file diff --git a/bin/intent_bootstrap b/bin/intent_bootstrap new file mode 100755 index 0000000..cbe450a --- /dev/null +++ b/bin/intent_bootstrap @@ -0,0 +1,168 @@ +#!/bin/bash +# intent_bootstrap - Initial setup for Intent v2.0.0 installations + +# Exit on error +set -e + +# Function to display usage +usage() { + cat << EOF +Usage: intent_bootstrap [OPTIONS] + +Initial setup for Intent v2.0.0. Creates global configuration and provides +setup instructions. + +Options: + -h, --help Show this help message + -f, --force Force recreation of config even if it exists + -q, --quiet Suppress informational output + +Examples: + intent_bootstrap # Standard setup + intent_bootstrap --force # Recreate configuration + +EOF + exit 0 +} + +# Parse command line arguments +FORCE=false +QUIET=false + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + ;; + -f|--force) + FORCE=true + shift + ;; + -q|--quiet) + QUIET=true + shift + ;; + *) + echo "Unknown option: $1" >&2 + echo "Use --help for usage information" >&2 + exit 1 + ;; + esac +done + +# Function to print unless quiet mode +info() { + if [ "$QUIET" != true ]; then + echo "$@" + fi +} + +# Main bootstrap function +bootstrap_intent() { + if [ "$QUIET" != true ]; then + echo "Intent Bootstrap v2.0.0" + echo "=======================" + echo "" + fi + + # 1. Detect or validate INTENT_HOME + if [ -z "$INTENT_HOME" ]; then + info "INTENT_HOME not set, detecting installation directory..." + + # Get the directory where this script is located + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + + # Check if we're in a valid intent installation + if [ -f "$SCRIPT_DIR/intent" ] || [ -f "$SCRIPT_DIR/intent_bootstrap" ]; then + # We're in the bin directory + INTENT_HOME="$(dirname "$SCRIPT_DIR")" + info "Found intent installation at: $INTENT_HOME" + else + # Try to find intent installation by crawling up + local current_dir="$SCRIPT_DIR" + while [ "$current_dir" != "/" ]; do + if [ -f "$current_dir/bin/intent" ] || [ -f "$current_dir/bin/intent_bootstrap" ]; then + INTENT_HOME="$current_dir" + info "Found intent installation at: $INTENT_HOME" + break + fi + current_dir=$(dirname "$current_dir") + done + fi + + if [ -z "$INTENT_HOME" ]; then + echo "ERROR: Could not detect intent installation directory" >&2 + echo "Please set INTENT_HOME and run bootstrap again" >&2 + exit 1 + fi + fi + + # 2. Validate installation + if [ ! -d "$INTENT_HOME/bin" ]; then + echo "ERROR: Invalid INTENT_HOME - bin directory not found at $INTENT_HOME/bin" >&2 + exit 1 + fi + + # 3. Create global config directory + info "Creating global config directory..." + mkdir -p "$HOME/.config/intent" + + # 4. Generate initial global config if it doesn't exist + local config_file="$HOME/.config/intent/config.json" + + if [ -f "$config_file" ] && [ "$FORCE" != true ]; then + info "Global configuration already exists at $config_file" + info "Use --force to recreate it" + else + if [ -f "$config_file" ] && [ "$FORCE" = true ]; then + info "Backing up existing configuration..." + cp "$config_file" "$config_file.bak.$(date +%Y%m%d_%H%M%S)" + fi + + info "Creating default global configuration..." + cat > "$config_file" << EOF +{ + "intent_version": "2.1.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "${USER}", + "editor": "${EDITOR:-vim}" +} +EOF + + if [ $? -eq 0 ]; then + info "Configuration created successfully at $config_file" + else + echo "ERROR: Failed to create configuration file" >&2 + exit 1 + fi + fi + + # 5. PATH setup recommendations + if [ "$QUIET" != true ]; then + echo "" + echo "Setup complete! Add the following to your shell configuration:" + echo "" + echo " export INTENT_HOME=\"$INTENT_HOME\"" + echo " export PATH=\"\$INTENT_HOME/bin:\$PATH\"" + echo "" + echo "For bash, add to ~/.bashrc or ~/.bash_profile" + echo "For zsh, add to ~/.zshrc" + echo "" + fi + + # 6. Run doctor to verify (only if doctor exists) + if [ -f "$INTENT_HOME/bin/intent_doctor" ] && [ -x "$INTENT_HOME/bin/intent_doctor" ]; then + if [ "$QUIET" != true ]; then + echo "Running intent doctor to verify installation..." + echo "" + fi + "$INTENT_HOME/bin/intent_doctor" + else + info "Note: intent_doctor not found, skipping verification" + info "Run 'intent doctor' after completing setup to verify installation" + fi +} + +# Run the bootstrap +bootstrap_intent \ No newline at end of file diff --git a/bin/intent_config b/bin/intent_config new file mode 100755 index 0000000..b2d4c59 --- /dev/null +++ b/bin/intent_config @@ -0,0 +1,226 @@ +#!/bin/bash +# intent_config - Shared configuration library for Intent +# Provides JSON parsing, config loading, and common functions + +# Source helpers for version function if available +if [ -z "$INTENT_HOME" ]; then + INTENT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +fi +if [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" +fi + +# Get version from centralized source +INTENT_VERSION="$(get_intent_version 2>/dev/null || echo "2.2.1")" + +# Default values +DEFAULT_INTENT_DIR="intent" +DEFAULT_BACKLOG_DIR="backlog" + +# Parse JSON file using jq +parse_json() { + local file=$1 + local prefix=$2 + + # Check if file exists + if [ ! -f "$file" ]; then + return 1 + fi + + # Check if jq is available + if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required but not installed." >&2 + echo "" >&2 + echo "Intent requires jq for JSON configuration parsing." >&2 + echo "" >&2 + echo "Installation instructions:" >&2 + if [ "$(uname)" = "Darwin" ]; then + echo " macOS: brew install jq" >&2 + elif [ "$(uname)" = "Linux" ]; then + echo " Debian/Ubuntu: sudo apt-get install jq" >&2 + echo " RedHat/CentOS: sudo yum install jq" >&2 + echo " Arch Linux: sudo pacman -S jq" >&2 + else + echo " Please install jq using your system's package manager" >&2 + fi + echo "" >&2 + echo "For more information: https://stedolan.github.io/jq/download/" >&2 + exit 1 + fi + + # Extract all key-value pairs as shell variable assignments + jq -r 'to_entries | .[] | "\(.key)=\"\(.value)\""' "$file" 2>/dev/null | \ + sed -e "s/^/${prefix}/" +} + +# Find project root by looking for intent/stp markers +find_project_root() { + local current_dir=$(pwd) + + while [ "$current_dir" != "/" ]; do + # Check for v2.0.0 structure + if [ -f "$current_dir/.intent/config.json" ]; then + echo "$current_dir" + return 0 + fi + + # Check for legacy structures + if [ -d "$current_dir/stp/.config" ] || [ -f "$current_dir/.stp-config" ]; then + echo "$current_dir" + return 0 + fi + + # Check for directory-based or file-based steel threads + if [ -d "$current_dir/stp/prj/st" ]; then + echo "$current_dir" + return 0 + fi + + current_dir=$(dirname "$current_dir") + done + + # No project root found + return 1 +} + +# Load Intent configuration with hierarchy +load_intent_config() { + # Initialize defaults (INTENT_VERSION already set above) + INTENT_DIR="${DEFAULT_INTENT_DIR}" + BACKLOG_DIR="${DEFAULT_BACKLOG_DIR}" + AUTHOR="${USER}" + EDITOR="${EDITOR:-vim}" + + # Find project root (ignore exit code to prevent set -e from exiting) + PROJECT_ROOT=$(find_project_root || true) + + # Load global config (XDG standard location) + if [ -f "$HOME/.config/intent/config.json" ]; then + local global_config + global_config=$(parse_json "$HOME/.config/intent/config.json" "global_") + if [ $? -eq 0 ]; then + eval "$global_config" + [ -n "$global_intent_dir" ] && INTENT_DIR="$global_intent_dir" + [ -n "$global_backlog_dir" ] && BACKLOG_DIR="$global_backlog_dir" + [ -n "$global_author" ] && AUTHOR="$global_author" + [ -n "$global_editor" ] && EDITOR="$global_editor" + [ -n "$global_backlog_list_status" ] && BACKLOG_LIST_STATUS="$global_backlog_list_status" + fi + fi + + # Load local config (overrides global) + if [ -n "$PROJECT_ROOT" ] && [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + local local_config + local_config=$(parse_json "$PROJECT_ROOT/.intent/config.json" "local_") + if [ $? -eq 0 ]; then + eval "$local_config" + [ -n "$local_intent_dir" ] && INTENT_DIR="$local_intent_dir" + [ -n "$local_backlog_dir" ] && BACKLOG_DIR="$local_backlog_dir" + [ -n "$local_author" ] && AUTHOR="$local_author" + [ -n "$local_editor" ] && EDITOR="$local_editor" + [ -n "$local_intent_version" ] && INTENT_VERSION="$local_intent_version" + [ -n "$local_backlog_list_status" ] && BACKLOG_LIST_STATUS="$local_backlog_list_status" + fi + fi + + # Environment variables override all + [ -n "$INTENT_DIR_OVERRIDE" ] && INTENT_DIR="$INTENT_DIR_OVERRIDE" + [ -n "$BACKLOG_DIR_OVERRIDE" ] && BACKLOG_DIR="$BACKLOG_DIR_OVERRIDE" + + # Legacy support: check for stp directory if intent doesn't exist + if [ -n "$PROJECT_ROOT" ]; then + if [ ! -d "$PROJECT_ROOT/$INTENT_DIR" ] && [ -d "$PROJECT_ROOT/stp" ]; then + INTENT_DIR="stp" + fi + fi + + # Export for use in subcommands + export INTENT_VERSION INTENT_DIR BACKLOG_DIR AUTHOR EDITOR PROJECT_ROOT BACKLOG_LIST_STATUS +} + +# Validate JSON syntax using jq +validate_json() { + local file=$1 + + if [ ! -f "$file" ]; then + return 1 + fi + + # Check if jq is available + if ! command -v jq >/dev/null 2>&1; then + return 1 + fi + + # Use jq to validate JSON syntax + if jq . "$file" >/dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +# Display configuration (for debugging) +show_config() { + echo "Intent Configuration:" + echo " INTENT_VERSION: $INTENT_VERSION" + echo " PROJECT_ROOT: ${PROJECT_ROOT:-<not in project>}" + echo " INTENT_DIR: $INTENT_DIR" + echo " BACKLOG_DIR: $BACKLOG_DIR" + echo " AUTHOR: $AUTHOR" + echo " EDITOR: $EDITOR" +} + +# Create default global config +create_default_global_config() { + local config_dir="$HOME/.config/intent" + local config_file="$config_dir/config.json" + + mkdir -p "$config_dir" + + cat > "$config_file" << EOF +{ + "intent_version": "2.1.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "${USER}", + "editor": "${EDITOR:-vim}" +} +EOF + + return $? +} + +# Determine if running in legacy mode +is_legacy_project() { + if [ -n "$PROJECT_ROOT" ]; then + # Check for old structures + if [ -f "$PROJECT_ROOT/.stp-config" ] || [ -d "$PROJECT_ROOT/stp" ]; then + if [ ! -f "$PROJECT_ROOT/.intent/config.json" ]; then + return 0 + fi + fi + fi + return 1 +} + +# Common error handling +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Common warning handling +warning() { + echo "Warning: $1" >&2 +} + +# Export functions for use by other scripts +export -f parse_json +export -f find_project_root +export -f load_intent_config +export -f validate_json +export -f show_config +export -f create_default_global_config +export -f is_legacy_project +export -f error +export -f warning \ No newline at end of file diff --git a/bin/intent_doctor b/bin/intent_doctor new file mode 100755 index 0000000..8ac4e52 --- /dev/null +++ b/bin/intent_doctor @@ -0,0 +1,626 @@ +#!/bin/bash +# intent_doctor - Configuration diagnostics and fixes for Intent v2.1.0 + +# Source the config library if we can find it +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +if [ -f "$SCRIPT_DIR/intent_config" ]; then + source "$SCRIPT_DIR/intent_config" + # Load the configuration to set PROJECT_ROOT and other variables + load_intent_config +elif [ -n "$INTENT_HOME" ] && [ -f "$INTENT_HOME/bin/intent_config" ]; then + source "$INTENT_HOME/bin/intent_config" + # Load the configuration to set PROJECT_ROOT and other variables + load_intent_config +else + # Fallback - define minimal functions we need + validate_json() { + local file=$1 + if [ ! -f "$file" ]; then + return 1 + fi + # Use jq if available + if command -v jq >/dev/null 2>&1; then + jq . "$file" >/dev/null 2>&1 + return $? + fi + # If no jq, just check file exists + return 0 + } +fi + +# Function to display usage +usage() { + cat << EOF +Usage: intent_doctor [OPTIONS] + +Diagnose and fix common Intent configuration issues. + +Options: + -h, --help Show this help message + -f, --fix Attempt to fix issues automatically + -v, --verbose Show detailed information + -q, --quiet Only show errors and warnings + +Examples: + intent_doctor # Check for issues + intent_doctor --fix # Fix issues automatically + +EOF + exit 0 +} + +# Parse command line arguments +FIX_MODE=false +VERBOSE=false +QUIET=false + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + ;; + -f|--fix) + FIX_MODE=true + shift + ;; + -v|--verbose) + VERBOSE=true + shift + ;; + -q|--quiet) + QUIET=true + shift + ;; + *) + echo "Unknown option: $1" >&2 + echo "Use --help for usage information" >&2 + exit 1 + ;; + esac +done + +# Counters for issues +ERRORS=0 +WARNINGS=0 +FIXED=0 + +# Color codes (disabled if not terminal or if quiet) +if [ -t 1 ] && [ "$QUIET" != true ]; then + RED='\033[0;31m' + YELLOW='\033[0;33m' + GREEN='\033[0;32m' + NC='\033[0m' # No Color +else + RED='' + YELLOW='' + GREEN='' + NC='' +fi + +# Status display functions +show_ok() { + if [ "$QUIET" != true ]; then + echo -e "${GREEN}OK${NC}" + fi +} + +show_error() { + echo -e "${RED}ERROR${NC}: $1" + ((ERRORS++)) +} + +show_warning() { + echo -e "${YELLOW}WARNING${NC}: $1" + ((WARNINGS++)) +} + +show_fixed() { + if [ "$FIX_MODE" = true ]; then + echo -e " ${GREEN}FIXED${NC}: $1" + ((FIXED++)) + fi +} + +info() { + if [ "$QUIET" != true ]; then + echo "$@" + fi +} + +verbose() { + if [ "$VERBOSE" = true ] && [ "$QUIET" != true ]; then + echo " $@" + fi +} + +# Main doctor function +doctor_check() { + # Get version from helpers if available + local version="2.2.1" + if [ -n "$INTENT_HOME" ] && [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" + version="$(get_intent_version 2>/dev/null || echo "2.2.1")" + fi + + if [ "$QUIET" != true ]; then + echo "Intent Doctor v$version" + echo "====================" + echo "" + fi + + # Check 1: INTENT_HOME + if [ "$QUIET" != true ]; then + echo -n "Checking INTENT_HOME... " + fi + + if [ -z "$INTENT_HOME" ]; then + show_error "Not set" + if [ "$FIX_MODE" = true ]; then + echo " FIX: Please run 'intent_bootstrap' to set up INTENT_HOME" + echo " Or set it manually: export INTENT_HOME=/path/to/intent" + fi + elif [ ! -d "$INTENT_HOME" ]; then + show_error "Directory does not exist: $INTENT_HOME" + else + show_ok + verbose "INTENT_HOME=$INTENT_HOME" + fi + + # Check 2: Intent executable + if [ "$QUIET" != true ]; then + echo -n "Checking intent executable... " + fi + + INTENT_FOUND=false + if [ -n "$INTENT_HOME" ]; then + if [ -f "$INTENT_HOME/bin/intent" ] && [ -x "$INTENT_HOME/bin/intent" ]; then + INTENT_FOUND=true + show_ok + verbose "Found at $INTENT_HOME/bin/intent" + elif [ -f "$INTENT_HOME/bin/intent_bootstrap" ]; then + # Bootstrap exists but main intent doesn't + show_warning "Not found (bootstrap exists)" + verbose "This is expected during initial setup" + else + show_error "Not found or not executable" + fi + else + show_error "Cannot check (INTENT_HOME not set)" + fi + + # Check 3: Global config + if [ "$QUIET" != true ]; then + echo -n "Checking global config... " + fi + + GLOBAL_CONFIG="$HOME/.config/intent/config.json" + if [ -f "$GLOBAL_CONFIG" ]; then + if validate_json "$GLOBAL_CONFIG"; then + show_ok + verbose "Found at $GLOBAL_CONFIG" + + # Check for required fields if verbose + if [ "$VERBOSE" = true ] && command -v jq >/dev/null 2>&1; then + if jq -e '.intent_version' "$GLOBAL_CONFIG" >/dev/null 2>&1; then + verbose "Has intent_version field" + else + verbose "Missing intent_version field" + fi + fi + else + show_error "Invalid JSON syntax" + if [ "$FIX_MODE" = true ]; then + echo " Backing up invalid config..." + mv "$GLOBAL_CONFIG" "$GLOBAL_CONFIG.bak.$(date +%Y%m%d_%H%M%S)" + if [ -f "$INTENT_HOME/bin/intent_bootstrap" ]; then + echo " Running bootstrap to create new config..." + "$INTENT_HOME/bin/intent_bootstrap" --quiet + show_fixed "Created new global config" + else + echo " FIX: Run 'intent_bootstrap' to create new config" + fi + fi + fi + else + show_warning "Not found" + if [ "$FIX_MODE" = true ]; then + if [ -f "$INTENT_HOME/bin/intent_bootstrap" ]; then + echo " Running bootstrap to create config..." + "$INTENT_HOME/bin/intent_bootstrap" --quiet + show_fixed "Created global config" + else + echo " FIX: Run 'intent_bootstrap' to create config" + fi + else + verbose "Run 'intent_bootstrap' to create global config" + fi + fi + + # Check 4: Local config (if in a project) + if [ -n "$PROJECT_ROOT" ]; then + if [ "$QUIET" != true ]; then + echo -n "Checking local config... " + fi + + LOCAL_CONFIG="$PROJECT_ROOT/.intent/config.json" + if [ -f "$LOCAL_CONFIG" ]; then + if validate_json "$LOCAL_CONFIG"; then + show_ok + verbose "Found at $LOCAL_CONFIG" + else + show_error "Invalid JSON syntax" + if [ "$FIX_MODE" = true ]; then + echo " FIX: Please fix JSON syntax in $LOCAL_CONFIG" + echo " Or remove it to use global config only" + fi + fi + else + # Check if this is a legacy project + if [ -f "$PROJECT_ROOT/.stp-config" ] || [ -d "$PROJECT_ROOT/stp" ]; then + show_warning "Legacy project without intent config" + verbose "Run 'intent upgrade' to migrate to v2.1.0" + else + info "Not found (using global config)" + fi + fi + else + verbose "Not in a project directory" + fi + + # Check 4b: v2.3.0 features (if in a project with v2.3.0+) + if [ -n "$PROJECT_ROOT" ] && [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + # Check version + local project_version=$(jq -r '.version // "0.0.0"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null) + if [[ "$project_version" == "2.3"* ]]; then + # Check for AGENTS.md + if [ "$QUIET" != true ]; then + echo -n "Checking AGENTS.md... " + fi + if [ -f "$PROJECT_ROOT/AGENTS.md" ] || [ -f "$PROJECT_ROOT/intent/llm/AGENTS.md" ]; then + show_ok + verbose "AGENTS.md found" + else + show_warning "Missing AGENTS.md" + verbose "Run 'intent agents init' to create AGENTS.md" + if [ "$FIX_MODE" = true ]; then + echo " Creating AGENTS.md..." + (cd "$PROJECT_ROOT" && intent agents init) + show_fixed "Created AGENTS.md" + fi + fi + + # Check for plugins config + if [ "$QUIET" != true ]; then + echo -n "Checking plugins config... " + fi + if jq -e '.plugins' "$PROJECT_ROOT/.intent/config.json" >/dev/null 2>&1; then + show_ok + verbose "Plugins configured" + else + show_warning "Missing plugins configuration" + verbose "Configuration may be incomplete from upgrade" + if [ "$FIX_MODE" = true ]; then + echo " Adding plugins configuration..." + local temp_file=$(mktemp) + jq '.plugins = {"claude": {"subagents_path": "intent/plugins/claude/subagents"}, "agents": {"config_path": "intent/plugins/agents"}}' "$PROJECT_ROOT/.intent/config.json" > "$temp_file" + mv "$temp_file" "$PROJECT_ROOT/.intent/config.json" + show_fixed "Added plugins configuration" + fi + fi + fi + fi + + # Check 5: PATH + if [ "$QUIET" != true ]; then + echo -n "Checking PATH... " + fi + + if [ -n "$INTENT_HOME" ]; then + if echo "$PATH" | grep -q "$INTENT_HOME/bin"; then + show_ok + verbose "$INTENT_HOME/bin is in PATH" + else + show_warning "$INTENT_HOME/bin not in PATH" + if [ "$FIX_MODE" = true ]; then + echo " FIX: Add to your shell configuration:" + echo " export PATH=\"\$INTENT_HOME/bin:\$PATH\"" + fi + fi + else + show_warning "Cannot check (INTENT_HOME not set)" + fi + + # Check 6: Required tools + if [ "$QUIET" != true ]; then + echo -n "Checking required tools... " + fi + + # Define tool categories + REQUIRED_TOOLS="bash sed grep mkdir" + CRITICAL_TOOLS="jq" # Separate jq for special handling + CORE_TOOLS="git cat echo pwd dirname basename date cut tr awk" + OPTIONAL_TOOLS="backlog bats" + + # Check required tools + MISSING_REQUIRED="" + for tool in $REQUIRED_TOOLS; do + if ! command -v "$tool" >/dev/null 2>&1; then + MISSING_REQUIRED="$MISSING_REQUIRED $tool" + fi + done + + # Check jq specifically (critical for Intent) + JQ_MISSING=false + if ! command -v jq >/dev/null 2>&1; then + JQ_MISSING=true + MISSING_REQUIRED="$MISSING_REQUIRED jq" + fi + + if [ -z "$MISSING_REQUIRED" ]; then + show_ok + verbose "All required tools found" + else + show_error "Missing required tools:$MISSING_REQUIRED" + echo "" + + # Platform-specific installation instructions + if [ "$(uname)" = "Darwin" ]; then + echo " Installation instructions for macOS:" + if [ "$JQ_MISSING" = true ]; then + echo " jq: brew install jq" + echo " (Required for JSON config parsing and agent management)" + fi + for tool in $MISSING_REQUIRED; do + if [ "$tool" != "jq" ]; then + echo " $tool: brew install $tool" + fi + done + elif [ "$(uname)" = "Linux" ]; then + echo " Installation instructions for Linux:" + if [ "$JQ_MISSING" = true ]; then + echo " jq: sudo apt-get install jq # Debian/Ubuntu" + echo " sudo yum install jq # RedHat/CentOS" + echo " sudo pacman -S jq # Arch" + echo " (Required for JSON config parsing and agent management)" + fi + for tool in $MISSING_REQUIRED; do + if [ "$tool" != "jq" ]; then + echo " $tool: Usually pre-installed, check your package manager" + fi + done + else + echo " Please install missing tools using your system's package manager" + if [ "$JQ_MISSING" = true ]; then + echo " jq is CRITICAL for Intent - configs and agents won't work without it!" + fi + fi + echo "" + fi + + # Check 7: Core tools (non-blocking warnings) + if [ "$VERBOSE" = true ]; then + if [ "$QUIET" != true ]; then + echo -n "Checking core tools... " + fi + + MISSING_CORE="" + for tool in $CORE_TOOLS; do + if ! command -v "$tool" >/dev/null 2>&1; then + MISSING_CORE="$MISSING_CORE $tool" + fi + done + + if [ -z "$MISSING_CORE" ]; then + show_ok + verbose "All core tools found" + else + show_warning "Missing core tools:$MISSING_CORE" + echo " These tools are used by Intent but may not be critical" + echo " Some features might not work without them" + fi + fi + + # Check 8: Optional tools + if [ "$QUIET" != true ]; then + echo -n "Checking optional tools... " + fi + + MISSING_OPTIONAL="" + OPTIONAL_STATUS="" + + # Check for backlog + if ! command -v backlog >/dev/null 2>&1; then + MISSING_OPTIONAL="$MISSING_OPTIONAL backlog" + OPTIONAL_STATUS="${OPTIONAL_STATUS}backlog:NOT_FOUND " + else + OPTIONAL_STATUS="${OPTIONAL_STATUS}backlog:OK " + fi + + # Check for bats (testing framework) + if ! command -v bats >/dev/null 2>&1; then + MISSING_OPTIONAL="$MISSING_OPTIONAL bats" + OPTIONAL_STATUS="${OPTIONAL_STATUS}bats:NOT_FOUND " + else + OPTIONAL_STATUS="${OPTIONAL_STATUS}bats:OK " + fi + + # Check for checksum tools (for agent management) + CHECKSUM_TOOL="" + if command -v sha256sum >/dev/null 2>&1; then + CHECKSUM_TOOL="sha256sum" + elif command -v shasum >/dev/null 2>&1; then + CHECKSUM_TOOL="shasum" + fi + + if [ -z "$CHECKSUM_TOOL" ]; then + MISSING_OPTIONAL="$MISSING_OPTIONAL checksum" + OPTIONAL_STATUS="${OPTIONAL_STATUS}checksum:NOT_FOUND " + else + OPTIONAL_STATUS="${OPTIONAL_STATUS}checksum:$CHECKSUM_TOOL " + fi + + if [ -z "$MISSING_OPTIONAL" ]; then + show_ok + verbose "All optional tools found: $OPTIONAL_STATUS" + else + if [ "$QUIET" != true ]; then + echo "Some missing" + verbose "Optional tools status: $OPTIONAL_STATUS" + fi + + # Provide installation instructions for optional tools + if echo "$MISSING_OPTIONAL" | grep -q "backlog"; then + verbose "Backlog.md: Task management system" + verbose " Install: npm install -g backlog-md" + verbose " More info: https://github.com/backlog-md/backlog-md" + fi + + if echo "$MISSING_OPTIONAL" | grep -q "bats"; then + verbose "BATS: Bash testing framework" + verbose " Install: npm install -g bats (or brew install bats-core)" + verbose " Used for: Running Intent test suite" + fi + + if echo "$MISSING_OPTIONAL" | grep -q "checksum"; then + verbose "Checksum tool: Required for agent integrity checks" + verbose " macOS: shasum is usually pre-installed" + verbose " Linux: Install coreutils package" + fi + fi + + # Check 9: File permissions + if [ -n "$INTENT_HOME" ] && [ "$VERBOSE" = true ]; then + if [ "$QUIET" != true ]; then + echo -n "Checking file permissions... " + fi + + PERM_ISSUES=false + for file in "$INTENT_HOME"/bin/*; do + if [ -f "$file" ] && [ ! -x "$file" ]; then + PERM_ISSUES=true + verbose "Not executable: $file" + fi + done + + if [ "$PERM_ISSUES" = false ]; then + show_ok + else + show_warning "Some files not executable" + if [ "$FIX_MODE" = true ]; then + chmod +x "$INTENT_HOME"/bin/* + show_fixed "Made all bin files executable" + fi + fi + fi + + # Check 10: Agent system + if [ "$QUIET" != true ]; then + echo -n "Checking agent system... " + fi + + # Check for new plugin structure (v2.3.0+) + if [ -d "$INTENT_HOME/intent/plugins/claude/subagents" ]; then + # New plugin-based structure + local subagent_count=0 + if [ -d "$INTENT_HOME/intent/plugins/claude/subagents" ]; then + subagent_count=$(ls -1 "$INTENT_HOME/intent/plugins/claude/subagents" 2>/dev/null | wc -l) + fi + + show_ok + verbose "Found $subagent_count available Claude subagents" + + # Check if Claude is available + if [ -d "$HOME/.claude" ]; then + # Check installed agents manifest + if [ -f "$HOME/.intent/agents/installed-agents.json" ]; then + if command -v jq >/dev/null 2>&1; then + local installed_count=$(jq -r '.agents | length' "$HOME/.intent/agents/installed-agents.json" 2>/dev/null || echo 0) + verbose "$installed_count Claude subagents installed" + fi + fi + else + verbose "Claude Code not detected - subagents not active" + fi + elif [ -d "$INTENT_HOME/agents" ]; then + # Legacy agent structure (pre-v2.3.0) + # Check manifest + if [ -f "$INTENT_HOME/agents/.manifest/global-agents.json" ]; then + # Check if jq is available for agent operations + if ! command -v jq >/dev/null 2>&1; then + show_warning "Cannot validate (jq not installed)" + echo " Agent system requires jq for JSON parsing" + echo " Install jq to enable agent management features" + else + # Validate manifest + if validate_json "$INTENT_HOME/agents/.manifest/global-agents.json"; then + AGENT_COUNT=$(jq -r '.agents | length' "$INTENT_HOME/agents/.manifest/global-agents.json" 2>/dev/null || echo 0) + show_ok + verbose "Found $AGENT_COUNT available agents (legacy structure)" + verbose "Consider upgrading Intent to v2.3.0+ for new plugin architecture" + + # Check if Claude is available + if [ -d "$HOME/.claude" ]; then + # Run quick agent status check + if command -v "$INTENT_HOME/bin/intent_agents" >/dev/null 2>&1; then + AGENT_STATUS=$("$INTENT_HOME/bin/intent_agents" status 2>&1 | grep -E "(OK|MISSING|ERROR)" | wc -l) + if [ "$AGENT_STATUS" -gt 0 ]; then + verbose "Installed agents detected (run 'intent agents status' for details)" + fi + fi + else + verbose "Claude Code not detected - agents not active" + fi + else + show_warning "Invalid agent manifest" + if [ "$FIX_MODE" = true ]; then + echo " FIX: Please reinstall Intent to fix agent manifest" + fi + fi + fi + else + show_warning "Agent manifest missing" + verbose "Expected: $INTENT_HOME/agents/.manifest/global-agents.json" + fi + else + if [ "$QUIET" != true ]; then + echo "Not available" + fi + verbose "Agents directory not found (may be older Intent version)" + fi + + # Summary + if [ "$QUIET" != true ]; then + echo "" + echo "Summary:" + echo " Errors: $ERRORS" + echo " Warnings: $WARNINGS" + if [ "$FIX_MODE" = true ]; then + echo " Fixed: $FIXED" + fi + echo "" + + if [ $ERRORS -eq 0 ] && [ $WARNINGS -eq 0 ]; then + echo -e "${GREEN}✓ All checks passed!${NC}" + elif [ $ERRORS -eq 0 ]; then + echo -e "${YELLOW}⚠ Some warnings found but no errors${NC}" + else + echo -e "${RED}✗ Issues found that need attention${NC}" + if [ "$FIX_MODE" != true ]; then + echo "" + echo "Run 'intent_doctor --fix' to attempt automatic fixes" + fi + fi + fi + + # Exit with appropriate code + if [ $ERRORS -gt 0 ]; then + exit 1 + elif [ $WARNINGS -gt 0 ]; then + exit 0 # Warnings don't cause failure + else + exit 0 + fi +} + +# Run the doctor check +doctor_check \ No newline at end of file diff --git a/bin/intent_fileindex b/bin/intent_fileindex new file mode 100755 index 0000000..7187635 --- /dev/null +++ b/bin/intent_fileindex @@ -0,0 +1,759 @@ +#!/bin/bash +# intent_fileindex - Create and manage file indexes with checkbox states +# Usage: intent fileindex [options] [startdir] [filespec] +# Can be run standalone or as part of an Intent project + +# Exit on error +set -e + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Initial values +IN_INTENT_PROJECT=false +PROJECT_ROOT="" +INTENT_VERSION="" +RECURSIVE=false +VERBOSE=false +OUTPUT_FILE="" +INDEX_FILE="" +INDEX_DIR_OVERRIDE="" +INTENT_DIR_OVERRIDE="" +NO_INTENT=false +PROCESSED_COUNT=0 +TOGGLE_FILE="" +TOGGLE_MODE=false +CHECK_FILE="" +CHECK_MODE=false +UNCHECK_FILE="" +UNCHECK_MODE=false + +# Function to find project root +find_project_root() { + local current_dir="$(pwd)" + while [ "$current_dir" != "/" ]; do + if [ -f "$current_dir/.intent/config.json" ]; then + echo "$current_dir" + return 0 + fi + current_dir="$(dirname "$current_dir")" + done + return 1 +} + +# Arrays to store file paths and their states separately +declare -a file_paths +declare -a file_check_states + +# Function to display usage +usage() { + echo "Usage: $(basename $0) [OPTIONS] [STARTDIR] [FILESPEC]" + echo "" + echo "Create and manage file indexes with checkbox states." + echo "" + echo "OPTIONS:" + echo " -r Recurse through subdirectories" + echo " -v Verbose mode (show processing and summary)" + echo " -f FILE Output to file instead of stdout" + echo " --file FILE Output to file instead of stdout" + echo " -i FILE Use index file to maintain checked states" + echo " --index FILE Use index file to maintain checked states" + echo " -X FILE Toggle checked state of FILE in index" + echo " --toggle FILE Toggle checked state of FILE in index" + echo " -C FILE Set FILE to checked [x] state in index" + echo " --check FILE Set FILE to checked [x] state in index" + echo " -U FILE Set FILE to unchecked [ ] state in index" + echo " --uncheck FILE Set FILE to unchecked [ ] state in index" + echo " --index-dir DIR Default directory for index files" + echo " --intent-dir Specify Intent project directory" + echo " --no-intent Disable Intent integration" + echo " -h Show this help message" + echo "" + echo "DEFAULTS:" + echo " When in Intent project:" + echo " STARTDIR: lib/" + echo " FILESPEC: *.{ex,exs}" + echo " INDEX_DIR: .intent/indexes/" + echo " When standalone:" + echo " STARTDIR: . (current directory)" + echo " FILESPEC: *.{ex,exs}" + echo " INDEX_DIR: . (current directory)" + echo "" + echo "EXAMPLES:" + echo " # List all Elixir files recursively" + echo " $(basename $0) -r" + echo "" + echo " # Create an index file for tracking" + echo " $(basename $0) -r -i myproject.index" + echo "" + echo " # Search Python files in src/" + echo " $(basename $0) src '*.py'" + echo "" + echo " # Toggle file state in index" + echo " $(basename $0) -i project.index -X lib/app.ex" + exit 1 +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -r) + RECURSIVE=true + shift + ;; + -v) + VERBOSE=true + shift + ;; + -f|--file) + OUTPUT_FILE="$2" + shift 2 + ;; + -i|--index) + INDEX_FILE="$2" + shift 2 + ;; + --index-dir) + INDEX_DIR_OVERRIDE="$2" + shift 2 + ;; + --intent-dir) + INTENT_DIR_OVERRIDE="$2" + shift 2 + ;; + --no-intent) + NO_INTENT=true + shift + ;; + -X|--toggle) + TOGGLE_FILE="$2" + shift 2 + ;; + -C|--check) + CHECK_FILE="$2" + shift 2 + ;; + -U|--uncheck) + UNCHECK_FILE="$2" + shift 2 + ;; + -h|--help) + usage + ;; + -*) + echo "Unknown option: $1" + usage + ;; + *) + if [[ -z "${STARTDIR_SET:-}" ]]; then + STARTDIR="$1" + STARTDIR_SET=true + elif [[ -z "${FILESPEC_SET:-}" ]]; then + FILESPEC="$1" + FILESPEC_SET=true + else + echo "Too many arguments" + usage + fi + shift + ;; + esac +done + +# Detect Intent project context after parsing args +if [ "$NO_INTENT" = false ]; then + # Check if intent_config is available + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + if [ -f "$SCRIPT_DIR/intent_config" ]; then + # Source it but don't fail if not in project + source "$SCRIPT_DIR/intent_config" 2>/dev/null || true + fi + + # Apply intent dir override if specified + if [ -n "$INTENT_DIR_OVERRIDE" ]; then + # Check Intent project with override + if [ -f "$INTENT_DIR_OVERRIDE/.intent/config.json" ]; then + IN_INTENT_PROJECT=true + PROJECT_ROOT="$INTENT_DIR_OVERRIDE" + INTENT_VERSION=$(jq -r '.intent_version // "2.1.0"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null || echo "2.1.0") + fi + else + # Try to find project root + if PROJECT_ROOT=$(find_project_root 2>/dev/null); then + IN_INTENT_PROJECT=true + if [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + INTENT_VERSION=$(jq -r '.intent_version // "2.1.0"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null || echo "2.1.0") + fi + fi + fi +fi + +# Set defaults based on context +if [ "$IN_INTENT_PROJECT" = true ] && [ "$NO_INTENT" = false ]; then + # Intent project defaults - only use if not already set + : ${STARTDIR:="lib"} + : ${FILESPEC:="*.{ex,exs}"} + : ${INDEX_DIR:=".intent/indexes"} +else + # Standalone defaults - only use if not already set + : ${STARTDIR:="."} + : ${FILESPEC:="*.{ex,exs}"} + : ${INDEX_DIR:="."} +fi + +# Apply index dir override if specified +if [ -n "$INDEX_DIR_OVERRIDE" ]; then + INDEX_DIR="$INDEX_DIR_OVERRIDE" +fi + +# Handle index file path +if [ -n "$INDEX_FILE" ] && [[ "$INDEX_FILE" != /* ]] && [[ "$INDEX_FILE" != */* ]]; then + # Just a filename, prepend index directory + if [ "$IN_INTENT_PROJECT" = true ] && [ "$NO_INTENT" = false ]; then + mkdir -p "$PROJECT_ROOT/$INDEX_DIR" 2>/dev/null || true + INDEX_FILE="$PROJECT_ROOT/$INDEX_DIR/$INDEX_FILE" + else + INDEX_FILE="$INDEX_DIR/$INDEX_FILE" + fi +fi + +# Moved index file handling after Intent detection + +# Validate start directory +if [[ ! -d "$STARTDIR" ]]; then + # If we're in an Intent project and tried to use lib but it doesn't exist, + # fall back to current directory + if [ "$IN_INTENT_PROJECT" = true ] && [ "$NO_INTENT" = false ] && [ "$STARTDIR" = "lib" ]; then + STARTDIR="." + else + echo "Error: Directory '$STARTDIR' does not exist" + exit 1 + fi +fi + +# Function to generate config JSON +generate_config_json() { + local timestamp + timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + local abs_startdir + abs_startdir=$(cd "$STARTDIR" && pwd) + local context="standalone" + + if [ "$IN_INTENT_PROJECT" = true ] && [ "$NO_INTENT" = false ]; then + context="intent_project" + fi + + cat << EOF +{ + "generator": "intent-fileindex", + "version": "1.1", + "timestamp": "$timestamp", + "context": "$context", + "config": { + "startdir": "$STARTDIR", + "absolute_startdir": "$abs_startdir", + "filespec": "$FILESPEC", + "recursive": $RECURSIVE, + "verbose": $VERBOSE, + "output_file": "${OUTPUT_FILE:-null}", + "index_file": "${INDEX_FILE:-null}", + "index_dir": "$INDEX_DIR" + } +EOF + + if [ "$IN_INTENT_PROJECT" = true ] && [ "$NO_INTENT" = false ]; then + cat << EOF + , + "intent": { + "project_root": "$PROJECT_ROOT", + "intent_version": "$INTENT_VERSION" + } +EOF + fi + + echo "}" +} + +# Function to find index of a file in arrays +find_file_index() { + local search_file="$1" + local i + for i in "${!file_paths[@]}"; do + if [[ "${file_paths[$i]}" == "$search_file" ]]; then + echo "$i" + return 0 + fi + done + return 1 +} + +# Function to add or update file state +set_file_state() { + local filepath="$1" + local state="$2" + + if idx=$(find_file_index "$filepath"); then + # Update existing + file_check_states[$idx]="$state" + else + # Add new + file_paths+=("$filepath") + file_check_states+=("$state") + fi +} + +# Function to get file state +get_file_state() { + local filepath="$1" + + if idx=$(find_file_index "$filepath"); then + echo "${file_check_states[$idx]}" + else + echo " " + fi +} + +# Function to read existing index file +read_index() { + if [[ -f "$INDEX_FILE" ]]; then + local in_json_block=false + local json_content="" + + while IFS= read -r line; do + # Detect start of JSON block + if [[ "$line" == "{" ]]; then + in_json_block=true + json_content="$line" + continue + fi + + # If we're in JSON block, accumulate content + if [[ "$in_json_block" == true ]]; then + json_content+=$'\n'"$line" + # Detect end of JSON block + if [[ "$line" == "}" ]]; then + in_json_block=false + if [[ "$VERBOSE" == true ]]; then + echo "Found existing config block in index" >&2 + fi + continue + fi + continue + fi + + # Skip empty lines + [[ -z "$line" ]] && continue + + # Parse file entries + if [[ "$line" =~ ^\[([ x])\]\ (.+)$ ]]; then + local state="${BASH_REMATCH[1]}" + local filepath="${BASH_REMATCH[2]}" + set_file_state "$filepath" "$state" + if [[ "$VERBOSE" == true ]]; then + echo "Loaded from index: [$state] $filepath" >&2 + fi + fi + done < "$INDEX_FILE" + fi +} + +# Function to normalise file path +normalise_path() { + local path="$1" + # Convert to relative path from current directory if possible + if [[ "$path" == "$PWD"/* ]]; then + echo "${path#$PWD/}" + else + echo "$path" + fi +} + +# Function to toggle file state in index +toggle_file_state() { + local target_file="$1" + local found=false + local new_state="" + + # Normalize the target file path + local normalized_target=$(normalise_path "$target_file") + + # Find and toggle the file + local i + for i in "${!file_paths[@]}"; do + if [[ "${file_paths[$i]}" == "$normalized_target" ]]; then + found=true + # Toggle the state + if [[ "${file_check_states[$i]}" == " " ]]; then + file_check_states[$i]="x" + new_state="x" + else + file_check_states[$i]=" " + new_state=" " + fi + echo "[$new_state] ${file_paths[$i]}" + break + fi + done + + if [[ "$found" == false ]]; then + echo "Error: File '$target_file' not found in index" >&2 + exit 1 + fi + + # Write updated index back to file + { + generate_config_json + echo "" + # Create sorted output + sort_indices=() + for i in "${!file_paths[@]}"; do + sort_indices+=("${file_paths[$i]}:$i") + done + + sorted_pairs=() + while IFS= read -r line; do + sorted_pairs+=("$line") + done < <(printf '%s\n' "${sort_indices[@]}" | sort) + + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + idx="${pair##*:}" + echo "[${file_check_states[$idx]}] $file" + done + } > "$INDEX_FILE" +} + +# Function to check file state in index (set to [x]) +check_file_state() { + local target_file="$1" + local found=false + + # Normalize the target file path + local normalized_target=$(normalise_path "$target_file") + + # Find and set the file to checked + local i + for i in "${!file_paths[@]}"; do + if [[ "${file_paths[$i]}" == "$normalized_target" ]]; then + found=true + # Set the state to checked + file_check_states[$i]="x" + echo "[x] ${file_paths[$i]}" + break + fi + done + + if [[ "$found" == false ]]; then + echo "Error: File '$target_file' not found in index" >&2 + exit 1 + fi + + # Write updated index back to file + { + generate_config_json + echo "" + # Create sorted output + sort_indices=() + for i in "${!file_paths[@]}"; do + sort_indices+=("${file_paths[$i]}:$i") + done + + sorted_pairs=() + while IFS= read -r line; do + sorted_pairs+=("$line") + done < <(printf '%s\n' "${sort_indices[@]}" | sort) + + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + idx="${pair##*:}" + echo "[${file_check_states[$idx]}] $file" + done + } > "$INDEX_FILE" +} + +# Function to uncheck file state in index (set to [ ]) +uncheck_file_state() { + local target_file="$1" + local found=false + + # Normalize the target file path + local normalized_target=$(normalise_path "$target_file") + + # Find and set the file to unchecked + local i + for i in "${!file_paths[@]}"; do + if [[ "${file_paths[$i]}" == "$normalized_target" ]]; then + found=true + # Set the state to unchecked + file_check_states[$i]=" " + echo "[ ] ${file_paths[$i]}" + break + fi + done + + if [[ "$found" == false ]]; then + echo "Error: File '$target_file' not found in index" >&2 + exit 1 + fi + + # Write updated index back to file + { + generate_config_json + echo "" + # Create sorted output + sort_indices=() + for i in "${!file_paths[@]}"; do + sort_indices+=("${file_paths[$i]}:$i") + done + + sorted_pairs=() + while IFS= read -r line; do + sorted_pairs+=("$line") + done < <(printf '%s\n' "${sort_indices[@]}" | sort) + + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + idx="${pair##*:}" + echo "[${file_check_states[$idx]}] $file" + done + } > "$INDEX_FILE" +} + +# Setup output redirection +if [[ -n "$OUTPUT_FILE" ]]; then + exec 3>&1 + exec 1>"$OUTPUT_FILE" +fi + +# Check if we're in toggle mode +if [[ -n "$TOGGLE_FILE" ]]; then + TOGGLE_MODE=true + + # Toggle requires an index file + if [[ -z "$INDEX_FILE" ]]; then + echo "Error: Toggle mode requires an index file (-i option)" >&2 + exit 1 + fi + + # Read the existing index + if [[ ! -f "$INDEX_FILE" ]]; then + echo "Error: Index file '$INDEX_FILE' does not exist" >&2 + exit 1 + fi + + read_index + toggle_file_state "$TOGGLE_FILE" + exit 0 +fi + +# Check if we're in check mode +if [[ -n "$CHECK_FILE" ]]; then + CHECK_MODE=true + + # Check requires an index file + if [[ -z "$INDEX_FILE" ]]; then + echo "Error: Check mode requires an index file (-i option)" >&2 + exit 1 + fi + + # Read the existing index + if [[ ! -f "$INDEX_FILE" ]]; then + echo "Error: Index file '$INDEX_FILE' does not exist" >&2 + exit 1 + fi + + read_index + check_file_state "$CHECK_FILE" + exit 0 +fi + +# Check if we're in uncheck mode +if [[ -n "$UNCHECK_FILE" ]]; then + UNCHECK_MODE=true + + # Uncheck requires an index file + if [[ -z "$INDEX_FILE" ]]; then + echo "Error: Uncheck mode requires an index file (-i option)" >&2 + exit 1 + fi + + # Read the existing index + if [[ ! -f "$INDEX_FILE" ]]; then + echo "Error: Index file '$INDEX_FILE' does not exist" >&2 + exit 1 + fi + + read_index + uncheck_file_state "$UNCHECK_FILE" + exit 0 +fi + +# Read existing index if using index mode +if [[ -n "$INDEX_FILE" ]]; then + read_index +fi + +# Function to process files +process_files() { + local search_dir="$1" + local find_args=() + local current_files=() + + # Build find command arguments + find_args+=("$search_dir") + if [[ "$RECURSIVE" != true ]]; then + find_args+=(-maxdepth 1) + fi + find_args+=(-type f) + + # Add file pattern arguments + # Note: We use string comparison (=) not pattern matching (==) here because + # brace expansion doesn't occur in [[ ]] pattern contexts, and behavior + # varies between bash versions on different platforms + if [[ "$FILESPEC" = "*.{ex,exs}" ]]; then + find_args+=(\( -name "*.ex" -o -name "*.exs" \)) + else + find_args+=(-name "$FILESPEC") + fi + find_args+=(-print0) + + # Find and collect all current files + while IFS= read -r -d '' file; do + local normalised_file + normalised_file=$(normalise_path "$file") + current_files+=("$normalised_file") + + if [[ "$VERBOSE" == true && -n "$OUTPUT_FILE" ]]; then + echo "Processing: $normalised_file" >&3 + elif [[ "$VERBOSE" == true ]]; then + echo "Processing: $normalised_file" >&2 + fi + + # Determine state for this file + local state=" " + if [[ -n "$INDEX_FILE" ]]; then + state=$(get_file_state "$normalised_file") + fi + + # Update or add to file state + set_file_state "$normalised_file" "$state" + PROCESSED_COUNT=$((PROCESSED_COUNT + 1)) + done < <(find "${find_args[@]}" 2>/dev/null) + + # If using index mode, remove files that no longer exist + if [[ -n "$INDEX_FILE" ]]; then + local new_paths=() + local new_states=() + local i + + for i in "${!file_paths[@]}"; do + local indexed_file="${file_paths[$i]}" + local found=false + + for current_file in "${current_files[@]}"; do + if [[ "$indexed_file" == "$current_file" ]]; then + found=true + break + fi + done + + if [[ "$found" == true ]]; then + new_paths+=("${file_paths[$i]}") + new_states+=("${file_check_states[$i]}") + else + if [[ "$VERBOSE" == true ]]; then + if [[ -n "$OUTPUT_FILE" ]]; then + echo "Removing from index (file no longer exists): $indexed_file" >&3 + else + echo "Removing from index (file no longer exists): $indexed_file" >&2 + fi + fi + fi + done + + # Update arrays + file_paths=("${new_paths[@]}") + file_check_states=("${new_states[@]}") + fi +} + +# Process files +process_files "$STARTDIR" + +# Output results +if [[ -n "$INDEX_FILE" ]]; then + # Output config JSON block first + generate_config_json + echo "" + + # Create sorted index pairs + sort_indices=() + i=0 + for i in "${!file_paths[@]}"; do + sort_indices+=("${file_paths[$i]}:$i") + done + + # Sort and output + sorted_pairs=() + while IFS= read -r line; do + sorted_pairs+=("$line") + done < <(printf '%s\n' "${sort_indices[@]}" | sort) + + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + idx="${pair##*:}" + echo "[${file_check_states[$idx]}] $file" + done + + # Write back to index file + { + generate_config_json + echo "" + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + idx="${pair##*:}" + echo "[${file_check_states[$idx]}] $file" + done + } > "$INDEX_FILE" +else + # Original behaviour - just output current files + sort_indices=() + i=0 + for i in "${!file_paths[@]}"; do + sort_indices+=("${file_paths[$i]}:$i") + done + + sorted_pairs=() + while IFS= read -r line; do + sorted_pairs+=("$line") + done < <(printf '%s\n' "${sort_indices[@]}" | sort) + + for pair in "${sorted_pairs[@]}"; do + file="${pair%:*}" + echo "[ ] $file" + done +fi + +# Show summary if verbose +if [[ "$VERBOSE" == true ]]; then + if [[ -n "$OUTPUT_FILE" ]]; then + echo "Summary: Processed $PROCESSED_COUNT files" >&3 + if [[ -n "$INDEX_FILE" ]]; then + echo "Index updated: $INDEX_FILE" >&3 + fi + else + echo "Summary: Processed $PROCESSED_COUNT files" >&2 + if [[ -n "$INDEX_FILE" ]]; then + echo "Index updated: $INDEX_FILE" >&2 + fi + fi +fi + +# Restore stdout if redirected +if [[ -n "$OUTPUT_FILE" ]]; then + exec 1>&3 + exec 3>&- +fi diff --git a/bin/intent_help b/bin/intent_help new file mode 100755 index 0000000..213b9ae --- /dev/null +++ b/bin/intent_help @@ -0,0 +1,135 @@ +#!/bin/bash +# intent_help - Display help for Intent commands +# Usage: intent help [command] + +# Exit on error +set -e + +# Source common library (but don't load project config - help is a global command) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INTENT_HOME="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Source helpers for version +if [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" +fi + +# Get version +INTENT_VERSION="$(get_intent_version 2>/dev/null || echo "2.2.1")" + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Check if INTENT_HOME is set +if [ -z "$INTENT_HOME" ]; then + error "INTENT_HOME environment variable is not set" +fi + +# Display command-specific help +if [ $# -eq 1 ]; then + COMMAND="$1" + HELP_FILE="$INTENT_HOME/lib/help/$COMMAND.help.md" + + if [ -f "$HELP_FILE" ]; then + # Display help file + cat "$HELP_FILE" + else + # Check if command exists but doesn't have help + COMMAND_SCRIPT="$INTENT_HOME/bin/intent_$COMMAND" + if [ -f "$COMMAND_SCRIPT" ]; then + echo "No help available for command '$COMMAND'" + echo "" + echo "Usage information may be available by running:" + echo " intent $COMMAND --help" + else + error "Unknown command '$COMMAND'" + fi + fi + exit 0 +fi + +# Display general help +cat << EOF +Intent v$INTENT_VERSION - Structured Development Process + +A system for structured development and documentation with LLM collaboration. +Formerly known as STP (Steel Thread Process). + +Usage: intent <command> [options] [arguments] + +Core: + info Display Intent status and project information + init Initialize a new Intent project + st Manage steel threads + bl Backlog.md integration + agents Manage Claude Code sub-agents + doctor Check and fix configuration + upgrade Upgrade from STP to Intent v2.1.0 + help Display help information + +Configuration: + bootstrap Initial Intent setup (first-time users) + config Display configuration settings + +Utility: + task Create and manage tasks linked to steel threads + status Show project and steel thread status + migrate Migrate tasks from steel threads to Backlog.md + llm Display LLM-specific guidance + organize Organize steel threads by status + +Other: +EOF + +# Find all intent_* commands in bin directory +for script in "$INTENT_HOME"/bin/intent_*; do + if [ -f "$script" ] && [ -x "$script" ]; then + # Extract command name from script name (remove intent_ prefix) + cmd_name=$(basename "$script" | sed 's/^intent_//') + + # Skip certain internal scripts and those manually listed above + case "$cmd_name" in + config|helpers|main|agents|info|init|st|bl|doctor|upgrade|help|bootstrap|task|status|migrate|llm|organize) continue ;; + esac + + # Construct help file path + help_file="$INTENT_HOME/lib/help/$cmd_name.help.md" + + # Get short description from help file if it exists + if [ -f "$help_file" ]; then + # Extract text between @short: and the next section (@) + short_desc=$(awk '/^@short:/{flag=1; next} /^@/{if(flag){flag=0}} flag' "$help_file" | \ + awk '{$1=$1}1' | \ + tr '\n' ' ' | \ + sed 's/^ *//; s/ *$//') + printf " %-16s %s\n" "$cmd_name" "$short_desc" + else + # Try to get description from script header + desc=$(head -n 10 "$script" | grep -E "^# intent_$cmd_name -" | sed "s/^# intent_$cmd_name - //") + if [ -n "$desc" ]; then + printf " %-16s %s\n" "$cmd_name" "$desc" + else + printf " %-16s %s\n" "$cmd_name" "(no description available)" + fi + fi + fi +done + +cat << EOF + +For more information on a specific command, run: + intent help <command> + +Quick Start: + 1. First-time setup: intent bootstrap + 2. New project: intent init "Project Name" + 3. New steel thread: intent st new "Title" + 4. Upgrade from STP: intent upgrade + +For complete documentation, see: + $INTENT_HOME/docs/user_guide.md + $INTENT_HOME/docs/reference_guide.md +EOF \ No newline at end of file diff --git a/bin/intent_helpers b/bin/intent_helpers new file mode 100755 index 0000000..ef8595d --- /dev/null +++ b/bin/intent_helpers @@ -0,0 +1,988 @@ +#!/bin/bash +# intent_helpers - Shared functions for Intent migration and utilities + +# Get Intent version from VERSION file +get_intent_version() { + local version_file="${INTENT_HOME:-$(dirname "${BASH_SOURCE[0]}")/..}/VERSION" + + if [ -f "$version_file" ]; then + cat "$version_file" + else + # Fallback version if VERSION file doesn't exist + echo "2.2.1" + fi +} + +# Export for use by other scripts +export -f get_intent_version + +# Convert YAML frontmatter to JSON format +convert_yaml_frontmatter() { + local file=$1 + local temp_file="${file}.tmp" + + # Check if file has YAML frontmatter + if ! head -1 "$file" | grep -q "^---$"; then + # No frontmatter, just copy the file to .tmp + cp "$file" "$temp_file" + return 0 + fi + + # Extract frontmatter and content + local in_frontmatter=false + local frontmatter="" + local content="" + local line_num=0 + + while IFS= read -r line; do + ((line_num++)) + if [ $line_num -eq 1 ] && [ "$line" = "---" ]; then + in_frontmatter=true + elif [ "$in_frontmatter" = true ] && [ "$line" = "---" ]; then + in_frontmatter=false + elif [ "$in_frontmatter" = true ]; then + frontmatter="${frontmatter}${line} +" + else + content="${content}${line} +" + fi + done < "$file" + + # Convert common fields + echo "---" > "$temp_file" + + # Extract and convert fields using more flexible parsing + while IFS= read -r line; do + if echo "$line" | grep -q "^stp_version:"; then + # Convert stp_version to intent_version + local version=$(echo "$line" | sed 's/stp_version:[[:space:]]*//') + echo "intent_version: 2.0.0" >> "$temp_file" + elif echo "$line" | grep -qE "^(verblock|status|created|completed|author):"; then + # Keep these fields as-is + echo "$line" >> "$temp_file" + fi + done <<< "$frontmatter" + + echo "---" >> "$temp_file" + printf "%b" "$content" >> "$temp_file" + + # Replace original file + mv "$temp_file" "$file" +} + +# Update stp_version to intent_version in frontmatter +update_version_in_frontmatter() { + local file=$1 + + if [ ! -f "$file" ]; then + return 1 + fi + + # Use sed to replace stp_version with intent_version + if grep -q "^stp_version:" "$file"; then + sed -i.bak 's/^stp_version:/intent_version:/' "$file" + rm -f "${file}.bak" + fi +} + +# Convert YAML config to JSON config +convert_yaml_config_to_json() { + local yaml_file=$1 + local json_file=$2 + + # For .stp-config files, we need custom parsing + if [ "$(basename "$yaml_file")" = ".stp-config" ]; then + { + echo "{" + + # Parse common fields from .stp-config + local first=true + while IFS= read -r line; do + # Skip comments and empty lines + if echo "$line" | grep -qE "^#|^$"; then + continue + fi + + # Extract key-value pairs + if echo "$line" | grep -q ":"; then + local key=$(echo "$line" | cut -d: -f1 | tr -d ' ') + local value=$(echo "$line" | cut -d: -f2- | sed 's/^ *//' | tr -d '"') + + # Map old keys to new ones + case "$key" in + project_name) + [ "$first" = false ] && echo "," + echo -n " \"project\": \"$value\"" + first=false + ;; + author) + [ "$first" = false ] && echo "," + echo -n " \"author\": \"$value\"" + first=false + ;; + stp_dir|st_dir) + # Skip these, we use standard names + ;; + editor) + [ "$first" = false ] && echo "," + echo -n " \"editor\": \"$value\"" + first=false + ;; + default_status) + # Skip, not used in v2.0.0 + ;; + esac + fi + done < "$yaml_file" + + # Add standard v2.0.0 fields + [ "$first" = false ] && echo "," + echo " \"intent_version\": \"2.0.0\"," + echo " \"intent_dir\": \"intent\"," + echo " \"backlog_dir\": \"backlog\"" + + echo "}" + } > "$json_file" + else + # For other YAML files, try to use yq if available + if command -v yq >/dev/null 2>&1; then + yq -o json "$yaml_file" > "$json_file" + else + # Fallback: create minimal JSON + echo "{" > "$json_file" + echo " \"intent_version\": \"2.0.0\"," >> "$json_file" + echo " \"intent_dir\": \"intent\"," >> "$json_file" + echo " \"backlog_dir\": \"backlog\"" >> "$json_file" + echo "}" >> "$json_file" + fi + fi +} + +# Create standard v2.0.0+ directory structure +create_v2_directory_structure() { + local project_root=$1 + + # Create directories + mkdir -p "$project_root/.intent" + mkdir -p "$project_root/intent/st/COMPLETED" + mkdir -p "$project_root/intent/st/NOT-STARTED" + mkdir -p "$project_root/intent/st/CANCELLED" + mkdir -p "$project_root/intent/eng/tpd" + mkdir -p "$project_root/intent/ref" + mkdir -p "$project_root/intent/llm" + mkdir -p "$project_root/backlog" + + # Create .gitignore if it doesn't exist + if [ ! -f "$project_root/.gitignore" ]; then + cat > "$project_root/.gitignore" << 'EOF' +# Intent/STP specific +.intent/local.json +*.bak +.backup_* + +# Backlog.md +backlog/.backlog-md/ + +# OS specific +.DS_Store +Thumbs.db + +# Editor specific +*.swp +*.swo +*~ +.vscode/ +.idea/ +EOF + fi +} + +# Flatten nested directory structure +flatten_directory_structure() { + local old_base=$1 # e.g., stp/prj/st + local new_base=$2 # e.g., intent/st + + if [ ! -d "$old_base" ]; then + return 0 + fi + + # Create new base directory + mkdir -p "$new_base" + + # Move all contents + if [ -n "$(ls -A "$old_base")" ]; then + mv "$old_base"/* "$new_base/" 2>/dev/null || true + mv "$old_base"/.[!.]* "$new_base/" 2>/dev/null || true + fi +} + +# Detect current STP/Intent version +detect_project_version() { + local project_root=${1:-.} + + # Check for v2.0.0+ (Intent) + if [ -f "$project_root/.intent/config.json" ]; then + # Try intent_version first, then version field + local version=$(jq -r '.intent_version // .version // empty' "$project_root/.intent/config.json" 2>/dev/null) + if [ -n "$version" ]; then + echo "$version" + return 0 + fi + fi + + # Check for v1.2.0+ (version file) + if [ -f "$project_root/stp/.config/version" ]; then + local version=$(grep "^stp_version:" "$project_root/stp/.config/version" | sed 's/stp_version:[[:space:]]*//') + if [ -n "$version" ]; then + echo "$version" + return 0 + fi + fi + + # Check for v0.0.0 (.stp-config) + if [ -f "$project_root/.stp-config" ]; then + echo "0.0.0" + return 0 + fi + + # Check for directory structure patterns + if [ -d "$project_root/stp/prj/st" ]; then + # Has STP structure but no version - assume 1.0.0 + echo "1.0.0" + return 0 + fi + + # Unable to determine version + return 1 +} + +# Create backup of project +create_project_backup() { + local project_root=$1 + local backup_dir=".backup_$(date +%Y%m%d_%H%M%S)" + + echo "Creating backup in $backup_dir..." + mkdir -p "$project_root/$backup_dir" + + # Create backup manifest + { + echo "Backup created: $(date)" + echo "Original version: $(detect_project_version "$project_root" || echo "unknown")" + echo "Files backed up:" + } > "$project_root/$backup_dir/manifest.txt" + + # Backup relevant directories and files + for item in stp .stp-config .intent intent backlog; do + if [ -e "$project_root/$item" ]; then + echo "Backing up $item..." + cp -r "$project_root/$item" "$project_root/$backup_dir/" + echo " $item" >> "$project_root/$backup_dir/manifest.txt" + fi + done + + echo "$backup_dir" +} + +# Check if migration is needed +needs_migration() { + local version=$1 + + # Already at v2.0.0 + if [ "$version" = "2.0.0" ]; then + return 1 + fi + + # All other versions need migration + return 0 +} + +# Display migration summary +show_migration_summary() { + local from_version=$1 + local file_count=$2 + local backup_dir=$3 + + echo "" + echo "Migration Summary:" + echo " From version: $from_version" + echo " To version: 2.0.0" + echo " Files affected: $file_count" + echo " Backup location: $backup_dir" + echo "" +} + +# Count files that will be migrated +count_migration_files() { + local project_root=$1 + local count=0 + + # Count Markdown files + if [ -d "$project_root/stp" ]; then + count=$(find "$project_root/stp" -name "*.md" -type f | wc -l) + fi + + # Add config files + [ -f "$project_root/.stp-config" ] && ((count++)) + [ -f "$project_root/stp/.config/version" ] && ((count++)) + + echo $count +} + +# Alias for compatibility +detect_stp_version() { + detect_project_version "$@" +} + +# Check if needs v2 migration +needs_v2_migration() { + local version=$1 + + case "$version" in + "2.0.0"|"2.1.0"|"2.2.0"|"2.2.1") + return 1 # Already v2 + ;; + *) + return 0 # Needs migration + ;; + esac +} + +# Check if needs 2.1.0 upgrade (agent initialization) +needs_v2_1_upgrade() { + local version=$1 + + case "$version" in + "2.0.0") + return 0 # Needs agent initialization + ;; + *) + return 1 # Already upgraded or too old + ;; + esac +} + +# Check if needs 2.2.0 upgrade (fileindex command) +needs_v2_2_upgrade() { + local version=$1 + + case "$version" in + "2.0.0"|"2.1.0") + return 0 # Needs fileindex upgrade + ;; + *) + return 1 # Already upgraded or too old + ;; + esac +} + +# Migrate v0.0.0 to v2.0.0 +migrate_v0_to_v2() { + local project_root=$1 + + echo "Migrating v0.0.0 structure..." + + # Create new structure + mkdir -p "$project_root/intent/st" + mkdir -p "$project_root/intent/docs" + + # Migrate steel threads + if [ -d "$project_root/stp/prj/st" ]; then + for file in "$project_root/stp/prj/st"/ST*.md; do + if [ -f "$file" ]; then + local basename=$(basename "$file" .md) + echo " Migrating $basename..." + mkdir -p "$project_root/intent/st/$basename" + convert_yaml_frontmatter "$file" + mv "$file.tmp" "$project_root/intent/st/$basename/info.md" + fi + done + fi + + # Migrate other known files + [ -f "$project_root/stp/prj/wip.md" ] && cp "$project_root/stp/prj/wip.md" "$project_root/intent/wip.md" + [ -f "$project_root/stp/eng/tpd/technical_product_design.md" ] && cp "$project_root/stp/eng/tpd/technical_product_design.md" "$project_root/intent/docs/" + + # Migrate ALL remaining content to ensure nothing is left behind + migrate_remaining_content "$project_root" + + # Create config + local project_name=$(basename "$project_root") + local author="${USER:-Unknown}" + local target_version="$(get_intent_version 2>/dev/null || echo "2.2.1")" + + cat > "$project_root/.intent/config.json" << EOF +{ + "version": "$target_version", + "project_name": "$project_name", + "author": "$author", + "created": "$(date +%Y-%m-%d)", + "st_prefix": "ST" +} +EOF +} + +# Migrate v1.2.0 to v2.0.0 +migrate_v1_2_0_to_v2() { + local project_root=$1 + + echo "Migrating v1.2.0 structure..." + + # Migrate steel threads (flat files) + if [ -d "$project_root/stp/prj/st" ]; then + for file in "$project_root/stp/prj/st"/ST*.md; do + if [ -f "$file" ]; then + local basename=$(basename "$file" .md) + echo " Migrating $basename..." + mkdir -p "$project_root/intent/st/$basename" + convert_yaml_frontmatter "$file" + mv "$file.tmp" "$project_root/intent/st/$basename/info.md" + fi + done + fi + + # Migrate other known content + flatten_directory_structure "$project_root/stp/prj" "$project_root/intent" + flatten_directory_structure "$project_root/stp/eng/tpd" "$project_root/intent/docs" + flatten_directory_structure "$project_root/stp/llm" "$project_root/intent/llm" + + # Migrate ALL remaining content to ensure nothing is left behind + migrate_remaining_content "$project_root" + + # Create config from existing YAML + if [ -f "$project_root/stp/.config/config" ]; then + convert_yaml_config_to_json "$project_root/stp/.config/config" "$project_root/.intent/config.json" + else + create_default_v2_config "$project_root" + fi +} + +# Migrate v1.2.1 to v2.0.0 +migrate_v1_2_1_to_v2() { + local project_root=$1 + + echo "Migrating v1.2.1 structure..." + + # This project actually uses v1.2.0 structure with status directories + # Migrate ALL steel threads from all locations + if [ -d "$project_root/stp/prj/st" ]; then + # First, migrate ST directories at the root level + for dir in "$project_root/stp/prj/st"/ST*/; do + if [ -d "$dir" ]; then + local basename=$(basename "$dir") + echo " Migrating $basename..." + mkdir -p "$project_root/intent/st/$basename" + cp -r "$dir"/* "$project_root/intent/st/$basename/" + + # Update metadata in info.md + if [ -f "$project_root/intent/st/$basename/info.md" ]; then + convert_yaml_frontmatter "$project_root/intent/st/$basename/info.md" || true + [ -f "$project_root/intent/st/$basename/info.md.tmp" ] && mv "$project_root/intent/st/$basename/info.md.tmp" "$project_root/intent/st/$basename/info.md" + fi + fi + done + + # Then migrate from status directories (COMPLETED, NOT-STARTED, etc) + for status_dir in "$project_root/stp/prj/st"/*/; do + if [ -d "$status_dir" ] && [[ ! "$(basename "$status_dir")" =~ ^ST[0-9]+ ]]; then + local status_name=$(basename "$status_dir") + for st_dir in "$status_dir"/ST*/; do + if [ -d "$st_dir" ]; then + local basename=$(basename "$st_dir") + echo " Migrating $basename from $status_name..." + mkdir -p "$project_root/intent/st/$status_name/$basename" + cp -r "$st_dir"/* "$project_root/intent/st/$status_name/$basename/" + + # Update metadata in info.md + if [ -f "$project_root/intent/st/$status_name/$basename/info.md" ]; then + convert_yaml_frontmatter "$project_root/intent/st/$status_name/$basename/info.md" || true + [ -f "$project_root/intent/st/$status_name/$basename/info.md.tmp" ] && mv "$project_root/intent/st/$status_name/$basename/info.md.tmp" "$project_root/intent/st/$status_name/$basename/info.md" + fi + fi + done + fi + done + + # Copy steel_threads.md + [ -f "$project_root/stp/prj/st/steel_threads.md" ] && cp "$project_root/stp/prj/st/steel_threads.md" "$project_root/intent/st/" + fi + + # Migrate other known content + [ -f "$project_root/stp/prj/wip.md" ] && cp "$project_root/stp/prj/wip.md" "$project_root/intent/" + [ -d "$project_root/stp/eng/tpd" ] && cp -r "$project_root/stp/eng/tpd"/* "$project_root/intent/eng/tpd/" 2>/dev/null || true + [ -d "$project_root/stp/llm" ] && cp -r "$project_root/stp/llm"/* "$project_root/intent/llm/" 2>/dev/null || true + + # Migrate ALL remaining content to ensure nothing is left behind + migrate_remaining_content "$project_root" + + # Create config + if [ -f "$project_root/stp/.config/config" ]; then + convert_yaml_config_to_json "$project_root/stp/.config/config" "$project_root/.intent/config.json" + else + create_default_v2_config "$project_root" + fi +} + +# Migrate v2.0.0 to v2.1.0 (agent initialization) +migrate_v2_0_to_v2_1() { + local project_root=$1 + + echo "Upgrading v2.0.0 to v2.1.0 (agent initialization)..." + + # Update .intent/config.json to v2.1.0 + if [ -f "$project_root/.intent/config.json" ]; then + echo " Updating project version to 2.1.0..." + local temp_file=$(mktemp) + jq '.intent_version = "2.1.0" | .version = "2.1.0"' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi + + # Initialize user agent manifest if needed + local user_manifest_dir="$HOME/.intent/agents" + if [ ! -d "$user_manifest_dir" ]; then + echo " Initializing user agent configuration..." + mkdir -p "$user_manifest_dir" + cat > "$user_manifest_dir/installed-agents.json" << 'EOF' +{ + "version": "1.0.0", + "installed": [] +} +EOF + fi + + echo " Agent system initialized successfully" +} + +# Migrate v2.1.0 to v2.2.0 (fileindex command) +migrate_v2_1_to_v2_2() { + local project_root=$1 + + echo "Upgrading v2.1.0 to v2.2.0 (fileindex command)..." + + # Update .intent/config.json to v2.2.0 + if [ -f "$project_root/.intent/config.json" ]; then + echo " Updating project version to 2.2.0..." + local temp_file=$(mktemp) + jq '.intent_version = "2.2.0" | .version = "2.2.0"' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi + + echo " Fileindex command now available" +} + +# Check if needs 2.2.1 upgrade +needs_v2_2_1_upgrade() { + local version=$1 + + case "$version" in + "2.2.0") + return 0 # Needs 2.2.1 upgrade + ;; + *) + return 1 # Already upgraded or different version + ;; + esac +} + +# Check if project needs v2.3.0 upgrade +needs_v2_3_0_upgrade() { + local version=$1 + + case "$version" in + "2.0.0"|"2.1.0"|"2.2.0"|"2.2.1") + return 0 # Needs 2.3.0 upgrade + ;; + *) + return 1 # Already upgraded or different version + ;; + esac +} + +# Check if project needs v2.3.1 upgrade +needs_v2_3_1_upgrade() { + local version=$1 + + case "$version" in + "2.0.0"|"2.1.0"|"2.2.0"|"2.2.1"|"2.3.0") + return 0 # Needs 2.3.1 upgrade + ;; + *) + return 1 # Already upgraded or different version + ;; + esac +} + +# Check if project needs v2.3.2 upgrade +needs_v2_3_2_upgrade() { + local version=$1 + + case "$version" in + "2.0.0"|"2.1.0"|"2.2.0"|"2.2.1"|"2.3.0"|"2.3.1") + return 0 # Needs 2.3.2 upgrade + ;; + *) + return 1 # Already upgraded or different version + ;; + esac +} + +# Migrate v2.2.0 to v2.2.1 +migrate_v2_2_to_v2_2_1() { + local project_root=$1 + + local target_version="$(get_intent_version 2>/dev/null || echo "2.2.1")" + echo "Upgrading v2.2.0 to v$target_version..." + + # Update .intent/config.json to target version + if [ -f "$project_root/.intent/config.json" ]; then + echo " Updating project version to $target_version..." + local temp_file=$(mktemp) + jq --arg v "$target_version" '.intent_version = $v | .version = $v' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi + + echo " Version updated to $target_version" +} + +# Generate basic AGENTS.md as fallback +_generate_basic_agents_md() { + local project_root=$1 + cat > "$project_root/intent/llm/AGENTS.md" << 'EOF' +# AGENTS.md + +## Project Overview +This is an Intent project. See CLAUDE.md for project-specific guidelines. + +## Development Environment +### Prerequisites +- Bash 4.0 or higher +- POSIX-compliant shell environment + +## Build and Test Commands +### Testing +```bash +# Check for test scripts in your project +intent doctor +``` + +## Code Style Guidelines +- Follow existing patterns in the codebase +- See CLAUDE.md for project-specific guidelines + +## Intent-Specific Information +### Steel Thread Process +Work is organized into steel threads (ST####) under intent/st/ + +### Available Commands +- `intent st list` - List all steel threads +- `intent agents sync` - Update this AGENTS.md +- `intent claude subagents list` - List Claude subagents +EOF + echo " Generated basic AGENTS.md template" +} + +# Migrate v2.2.x to v2.3.0 - Plugin architecture and AGENTS.md support +migrate_v2_2_to_v2_3_0() { + local project_root=$1 + + local target_version="$(get_intent_version 2>/dev/null || echo "2.3.0")" + echo "Upgrading to Intent v$target_version with plugin architecture..." + + # 1. Create plugin directory structure + echo " Creating plugin architecture..." + mkdir -p "$project_root/intent/plugins/claude/subagents" + mkdir -p "$project_root/intent/plugins/agents/templates" + + # 2. Migrate agents to plugin location + if [ -d "$project_root/agents" ]; then + echo " Migrating agents to plugin location..." + if [ "$(ls -A "$project_root/agents" 2>/dev/null)" ]; then + cp -r "$project_root/agents/"* "$project_root/intent/plugins/claude/subagents/" 2>/dev/null || true + fi + # Keep old agents directory for now, will be cleaned up later + fi + + # 3. Create AGENTS.md if it doesn't exist + if [ ! -f "$project_root/AGENTS.md" ] && [ ! -f "$project_root/intent/llm/AGENTS.md" ]; then + echo " Generating AGENTS.md..." + mkdir -p "$project_root/intent/llm" + + # Try to use the smart generator via intent command + if [ -f "$INTENT_HOME/bin/intent" ] && [ -f "$INTENT_HOME/intent/plugins/agents/bin/intent_agents" ]; then + # Call the generator directly via intent command + if (cd "$project_root" && "$INTENT_HOME/bin/intent" agents generate > "intent/llm/AGENTS.md" 2>/dev/null); then + if [ -s "$project_root/intent/llm/AGENTS.md" ]; then + echo " Generated project-specific AGENTS.md" + else + # Fallback if generation produced empty file + _generate_basic_agents_md "$project_root" + fi + else + # Fallback if command failed + _generate_basic_agents_md "$project_root" + fi + else + # Fallback to basic template if Intent not properly installed + _generate_basic_agents_md "$project_root" + fi + + # Create symlink at project root + ln -sf "intent/llm/AGENTS.md" "$project_root/AGENTS.md" + echo " Created AGENTS.md with symlink at project root" + elif [ -f "$project_root/AGENTS.md" ] || [ -f "$project_root/intent/llm/AGENTS.md" ]; then + echo " AGENTS.md already exists, skipping generation" + fi + + # 4. Update .intent/config.json + if [ -f "$project_root/.intent/config.json" ]; then + echo " Updating configuration for v$target_version..." + local temp_file=$(mktemp) + jq --arg v "$target_version" '.intent_version = $v | .version = $v | .plugins = {"claude": {"subagents_path": "intent/plugins/claude/subagents"}, "agents": {"config_path": "intent/plugins/agents"}}' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi + + # 5. Update CLAUDE.md with new commands + if [ -f "$project_root/CLAUDE.md" ]; then + echo " Updating CLAUDE.md with v$target_version commands..." + # Update agent commands to claude subagents + sed -i.bak 's/`intent agents init`/`intent claude subagents init`/g' "$project_root/CLAUDE.md" + sed -i.bak 's/`intent agents list`/`intent claude subagents list`/g' "$project_root/CLAUDE.md" + sed -i.bak 's/`intent agents install/`intent claude subagents install/g' "$project_root/CLAUDE.md" + rm -f "$project_root/CLAUDE.md.bak" + fi + + echo " Migration to v$target_version complete!" + echo "" + echo " IMPORTANT CHANGES IN v2.3.0:" + echo " - 'intent agents' commands are now 'intent claude subagents'" + echo " - New 'intent agents' commands manage AGENTS.md" + echo " - Subagents moved to intent/plugins/claude/subagents/" + echo " - AGENTS.md provides universal AI agent instructions" +} + +# Migrate v2.3.0 to v2.3.1 - Worker-bee agent integration +migrate_v2_3_0_to_v2_3_1() { + local project_root=$1 + + local target_version="$(get_intent_version 2>/dev/null || echo "2.3.2")" + echo "Upgrading to Intent v$target_version with worker-bee agent..." + + # Update .intent/config.json to target version + if [ -f "$project_root/.intent/config.json" ]; then + echo " Updating project version to $target_version..." + local temp_file=$(mktemp) + jq --arg v "$target_version" '.intent_version = $v | .version = $v' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi + + # Check if worker-bee agent needs to be added to manifest + if [ -f "$project_root/intent/plugins/claude/subagents/.manifest/global-agents.json" ]; then + if ! grep -q "worker-bee" "$project_root/intent/plugins/claude/subagents/.manifest/global-agents.json"; then + echo " Adding worker-bee agent to global manifest..." + # This would normally be done by the Intent installation, but we document it + echo " Note: Run 'intent claude subagents list' to see worker-bee agent" + fi + fi + + # Update CLAUDE.md if it exists and doesn't mention worker-bee + if [ -f "$project_root/CLAUDE.md" ]; then + if ! grep -q "worker-bee" "$project_root/CLAUDE.md"; then + echo " Note: CLAUDE.md may need updating to include worker-bee agent" + echo " Run 'intent doctor' to check for any issues" + fi + fi + + echo " Migration to v$target_version complete!" + echo "" + echo " IMPORTANT CHANGES IN v2.3.1:" + echo " - Added worker-bee agent for Worker-Bee Driven Design (WDD)" + echo " - Worker-bee agent includes resources directory with Mix tasks and templates" + echo " - Run 'intent claude subagents install worker-bee' to install the agent" +} + +# Update config version helper function +update_config_version() { + local project_root=$1 + local target_version=$2 + + if [ -f "$project_root/.intent/config.json" ]; then + local temp_file=$(mktemp) + jq --arg v "$target_version" '.intent_version = $v | .version = $v' "$project_root/.intent/config.json" > "$temp_file" + mv "$temp_file" "$project_root/.intent/config.json" + fi +} + +# Migrate v2.3.1 to v2.3.2 - Enhanced Elixir subagent with antipattern detection +migrate_v2_3_1_to_v2_3_2() { + local project_root=$1 + + local target_version="$(get_intent_version 2>/dev/null || echo "2.3.2")" + echo "Upgrading to Intent v$target_version with enhanced Elixir subagent..." + + # Update config version + echo " Updating config version..." + update_config_version "$project_root" "$target_version" + + # The Elixir subagent enhancements are handled by intent claude subagents sync + echo " Elixir subagent enhancements:" + echo " - Added comprehensive antipattern detection (24 patterns)" + echo " - Antipatterns categorized: Code, Design, Process, Meta-programming" + echo " - Full documentation at intent/plugins/claude/subagents/elixir/antipatterns.md" + + # Check if elixir subagent is installed + if [ -f "$project_root/.claude/agents/elixir.md" ]; then + echo " Elixir subagent detected. Run 'intent claude subagents sync' to update." + fi + + echo " Migration to v$target_version complete!" + echo "" + echo " IMPORTANT CHANGES IN v2.3.2:" + echo " - Enhanced Elixir subagent with antipattern detection" + echo " - Detects and remediates 24 common Elixir antipatterns" + echo " - Antipatterns sourced from official Elixir documentation" + echo " - Run 'intent claude subagents sync' to update installed agents" +} + +# Migrate all remaining content from stp/ to intent/ +migrate_remaining_content() { + local project_root=$1 + + echo " Migrating any additional content..." + + # Migrate remaining content from stp/* (top level) + if [ -d "$project_root/stp" ]; then + for item in "$project_root/stp/"*; do + if [ -e "$item" ]; then + local basename=$(basename "$item") + # Skip already handled directories and config + if [[ "$basename" != "prj" && "$basename" != ".config" && "$basename" != "eng" && "$basename" != "llm" && "$basename" != "usr" ]]; then + echo " Migrating additional: stp/$basename" + cp -r "$item" "$project_root/intent/" 2>/dev/null || true + fi + fi + done + fi + + # Migrate remaining content from stp/prj/* (except st/ and wip.md) + if [ -d "$project_root/stp/prj" ]; then + for item in "$project_root/stp/prj/"*; do + if [ -e "$item" ]; then + local basename=$(basename "$item") + if [[ "$basename" != "st" && "$basename" != "wip.md" ]]; then + echo " Migrating additional: stp/prj/$basename" + cp -r "$item" "$project_root/intent/" 2>/dev/null || true + fi + fi + done + fi + + # Migrate remaining content from stp/eng/* (except tpd/) + if [ -d "$project_root/stp/eng" ]; then + for item in "$project_root/stp/eng/"*; do + if [ -e "$item" ]; then + local basename=$(basename "$item") + if [[ "$basename" != "tpd" ]]; then + echo " Migrating additional: stp/eng/$basename" + mkdir -p "$project_root/intent/eng" + cp -r "$item" "$project_root/intent/eng/" 2>/dev/null || true + fi + fi + done + fi + + # Migrate remaining content from stp/usr/* + if [ -d "$project_root/stp/usr" ]; then + echo " Migrating stp/usr/ content" + mkdir -p "$project_root/intent/usr" + cp -r "$project_root/stp/usr/"* "$project_root/intent/usr/" 2>/dev/null || true + fi + + # Update file references in all migrated files + echo " Updating file references..." + find "$project_root/intent" -type f -name "*.md" -o -name "*.txt" | while read -r file; do + # Update stp/ references to intent/ + sed -i.bak 's|stp/prj/st/|intent/st/|g' "$file" 2>/dev/null || true + sed -i.bak 's|stp/prj/|intent/|g' "$file" 2>/dev/null || true + sed -i.bak 's|stp/eng/|intent/eng/|g' "$file" 2>/dev/null || true + sed -i.bak 's|stp/usr/|intent/usr/|g' "$file" 2>/dev/null || true + sed -i.bak 's|stp/llm/|intent/llm/|g' "$file" 2>/dev/null || true + # Clean up backup files + rm -f "${file}.bak" 2>/dev/null || true + done +} + +# Create default v2 config +create_default_v2_config() { + local project_root=$1 + local project_name=$(basename "$project_root") + local author="${USER:-Unknown}" + local target_version="$(get_intent_version 2>/dev/null || echo "2.2.1")" + + cat > "$project_root/.intent/config.json" << EOF +{ + "version": "$target_version", + "project_name": "$project_name", + "author": "$author", + "created": "$(date +%Y-%m-%d)", + "st_prefix": "ST" +} +EOF +} + +# Create CLAUDE.md for Intent v2.0.0+ +create_claude_md() { + local project_root=$1 + local force_overwrite=${2:-false} + local project_name=$(jq -r '.project_name // "Project"' "$project_root/.intent/config.json" 2>/dev/null || echo "Project") + local author=$(jq -r '.author // "Unknown"' "$project_root/.intent/config.json" 2>/dev/null || echo "${USER:-Unknown}") + local version=$(jq -r '.intent_version // "2.0.0"' "$project_root/.intent/config.json" 2>/dev/null || echo "2.0.0") + + # Check if CLAUDE.md already exists + if [ -f "$project_root/CLAUDE.md" ] && [ "$force_overwrite" != "true" ]; then + return 0 # File exists and we're not forcing overwrite + fi + + cat > "$project_root/CLAUDE.md" << EOF +# $project_name Project Guidelines + +This is an Intent v$version project (formerly STP). + +## Project Structure + +- \`intent/\` - Project artifacts (steel threads, docs, work tracking) + - \`st/\` - Steel threads organized as directories + - \`docs/\` - Technical documentation + - \`llm/\` - LLM-specific guidelines +- \`backlog/\` - Task management (if using Backlog.md) +- \`.intent/\` - Configuration and metadata + +## Steel Threads + +Steel threads are organized as directories under \`intent/st/\`: +- Each steel thread has its own directory (e.g., ST0001/) +- Minimum required file is \`info.md\` with metadata +- Optional files: design.md, impl.md, tasks.md + +## Commands + +- \`intent st new "Title"\` - Create a new steel thread +- \`intent st list\` - List all steel threads +- \`intent st show <id>\` - Show steel thread details +- \`intent agents init\` - Initialize agent configuration +- \`intent agents list\` - List available agents +- \`intent agents install <agent>\` - Install an agent +- \`intent doctor\` - Check configuration +- \`intent help\` - Get help + +## Migration Notes + +This project was migrated from STP to Intent v$version on $(date +%Y-%m-%d). +- Old structure: \`stp/prj/st/\`, \`stp/eng/\`, etc. +- New structure: \`intent/st/\`, \`intent/docs/\`, etc. +- Configuration moved from YAML to JSON format + +## Author + +$author +EOF +} \ No newline at end of file diff --git a/bin/intent_info b/bin/intent_info new file mode 100755 index 0000000..672c2f6 --- /dev/null +++ b/bin/intent_info @@ -0,0 +1,119 @@ +#!/bin/bash +# intent_info - Display Intent installation and project information +# Usage: intent info + +# Exit on error +set -e + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Function to count files in a directory +count_files() { + local dir="$1" + local pattern="${2:-*}" + if [ -d "$dir" ]; then + find "$dir" -name "$pattern" -type f 2>/dev/null | wc -l | tr -d ' ' + else + echo "0" + fi +} + +# Function to count directories +count_dirs() { + local dir="$1" + if [ -d "$dir" ]; then + find "$dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l | tr -d ' ' + else + echo "0" + fi +} + +# Display title +echo "Intent: The Steel Thread Process" +echo + +# Display Intent installation info +echo "Installation:" +echo " INTENT_HOME: ${INTENT_HOME:-<not set>}" +# Source helpers for version if available +if [ -n "$INTENT_HOME" ] && [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" + VERSION_DISPLAY="$(get_intent_version 2>/dev/null || echo "2.2.1")" +else + VERSION_DISPLAY="${INTENT_VERSION:-2.2.1}" +fi +echo " Version: $VERSION_DISPLAY" +echo " Executable: $(which intent 2>/dev/null || echo '<not in PATH>')" +echo + +# Try to load config to find project +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +if [ -f "$SCRIPT_DIR/intent_config" ]; then + source "$SCRIPT_DIR/intent_config" + + # Try to find project root without failing + PROJECT_ROOT=$(find_project_root 2>/dev/null || echo "") +fi + +# Display project info if in a project +if [ -n "$PROJECT_ROOT" ]; then + echo "Project:" + echo " Location: $PROJECT_ROOT" + + # Try to load project config + if [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + project_name=$(jq -r '.project_name // "Unknown"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null || echo "Unknown") + author=$(jq -r '.author // "Unknown"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null || echo "Unknown") + created=$(jq -r '.created_date // "Unknown"' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null || echo "Unknown") + + echo " Name: $project_name" + echo " Author: $author" + echo " Created: $created" + fi + + # Count steel threads + echo + echo "Steel Threads:" + if [ -d "$PROJECT_ROOT/intent/st" ]; then + # Count by status + completed=$(count_dirs "$PROJECT_ROOT/intent/st/COMPLETED") + not_started=$(count_dirs "$PROJECT_ROOT/intent/st/NOT-STARTED") + cancelled=$(count_dirs "$PROJECT_ROOT/intent/st/CANCELLED") + in_progress=$(find "$PROJECT_ROOT/intent/st" -mindepth 1 -maxdepth 1 -type d -name "ST*" 2>/dev/null | wc -l | tr -d ' ') + + total=$((completed + not_started + cancelled + in_progress)) + + echo " Total: $total" + echo " In Progress: $in_progress" + echo " Completed: $completed" + echo " Not Started: $not_started" + echo " Cancelled: $cancelled" + else + echo " No steel threads found" + fi + + # Check for Backlog integration + if [ -f "$PROJECT_ROOT/backlog/Backlog.md" ]; then + echo + echo "Backlog:" + echo " Status: Active" + if command -v backlog &> /dev/null; then + # Try to get task count + task_count=$(cd "$PROJECT_ROOT" && backlog stat --format json 2>/dev/null | jq -r '.totalTasks // 0' 2>/dev/null || echo "Unknown") + echo " Tasks: $task_count" + fi + fi + +else + echo "Project:" + echo " Not in an Intent project directory" + echo + echo "To create a new project: intent init" + echo "To see available commands: intent help" +fi + +echo \ No newline at end of file diff --git a/bin/intent_init b/bin/intent_init new file mode 100755 index 0000000..c0c776c --- /dev/null +++ b/bin/intent_init @@ -0,0 +1,292 @@ +#!/bin/bash +# intent_init - Initialize Intent v2.0.0 project +# Usage: intent init [project_name] + +# Exit on error +set -e + +# Source common library (but don't load project config - init creates new projects) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Source helpers for version +if [ -z "$INTENT_HOME" ]; then + INTENT_HOME="$(cd "$SCRIPT_DIR/.." && pwd)" +fi +if [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" +fi + +# Get version from centralized source +INTENT_VERSION="$(get_intent_version 2>/dev/null || echo "2.2.1")" + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Function to display usage information +usage() { + echo "Usage: intent init [project_name]" + echo "" + echo "Initialize a new Intent v$INTENT_VERSION project in the current directory" + echo "" + echo "Arguments:" + echo " project_name Name of the project (optional, defaults to directory name)" + echo "" + echo "Example:" + echo " intent init \"My Project\"" + exit 1 +} + +# Parse options +while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) + usage + ;; + -*) + error "Unknown option: $1" + ;; + *) + # First non-option argument is project name + break + ;; + esac +done + +# Get project name (default to current directory name) +PROJECT_NAME="${1:-$(basename "$(pwd)")}" + +# Check if already initialized +if [ -f ".intent/config.json" ]; then + error "This directory is already an Intent project" +fi + +# Get author information +AUTHOR="${INTENT_AUTHOR:-${USER:-Unknown}}" +DATE="$(date '+%Y-%m-%d')" + +echo "Initializing Intent v2.0.0 project: $PROJECT_NAME" + +# Create directory structure +echo "Creating directory structure..." +mkdir -p .intent +mkdir -p intent/{st,docs,llm} +mkdir -p backlog + +# Create local configuration +echo "Creating configuration..." +cat > .intent/config.json << EOF +{ + "version": "2.1.0", + "project_name": "$PROJECT_NAME", + "author": "$AUTHOR", + "created": "$DATE", + "st_prefix": "ST" +} +EOF + +# Create version file +cat > .intent/version << EOF +2.0.0 +EOF + +# Create initial files from templates +echo "Creating initial files..." + +# Template directory +TEMPLATE_DIR="${INTENT_HOME}/lib/templates" + +# Create work in progress file +if [ -f "$TEMPLATE_DIR/prj/_wip.md" ]; then + sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ + -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ + -e "s/\[\[DATE\]\]/$DATE/g" \ + "$TEMPLATE_DIR/prj/_wip.md" > "intent/wip.md" +else + # Create a basic wip.md if template not found + cat > "intent/wip.md" << EOF +--- +verblock: "$DATE:v0.1: $AUTHOR - Initial version" +--- +# Work In Progress - $PROJECT_NAME + +## Current Focus + +[Document your current work here] +EOF +fi + +# Create initial documentation +cat > "intent/docs/technical_product_design.md" << EOF +--- +verblock: "$DATE:v0.1: $AUTHOR - Initial version" +--- +# Technical Product Design - $PROJECT_NAME + +## Preamble to Claude + +[Add project context and instructions for AI assistants here] + +## Overview + +[Project overview] + +## Architecture + +[System architecture] + +## Implementation + +[Implementation details] +EOF + +# Helper function for basic AGENTS.md +_create_basic_agents_md() { + cat > "intent/llm/AGENTS.md" << EOF +# AGENTS.md + +## Project Overview +$PROJECT_NAME - Created by $AUTHOR on $DATE + +## Development Environment +### Prerequisites +- [Add your project requirements] + +## Build and Test Commands +### Testing +\`\`\`bash +# Add your test commands +\`\`\` + +## Code Style Guidelines +- Follow existing patterns in the codebase +- See CLAUDE.md for Intent-specific guidelines + +## Intent-Specific Information +See CLAUDE.md for Intent project structure and commands. +EOF +} + +# Create AGENTS.md using smart generator +echo "Creating AGENTS.md..." +# Try to use the smart generator if Intent is installed +if [ -n "$INTENT_HOME" ] && [ -f "$INTENT_HOME/intent/plugins/agents/bin/intent_agents" ]; then + # Call the generator directly + PROJECT_ROOT="$(pwd)" "$INTENT_HOME/bin/intent" agents generate > "intent/llm/AGENTS.md" 2>/dev/null + if [ ! -s "intent/llm/AGENTS.md" ]; then + # Fallback to basic template if generation failed + _create_basic_agents_md + fi +else + # Fallback to basic template + _create_basic_agents_md +fi + +# Create symlink at project root +ln -sf "intent/llm/AGENTS.md" "AGENTS.md" + +# Create CLAUDE.md +cat > "CLAUDE.md" << EOF +# $PROJECT_NAME Project Guidelines + +This is an Intent v2.0.0 project. + +## Project Structure + +- \`intent/\` - Project artifacts (steel threads, docs, work tracking) +- \`backlog/\` - Task management (if using Backlog.md) +- \`.intent/\` - Configuration and metadata + +## Steel Threads + +Steel threads are organized as directories under \`intent/st/\`: +- Each steel thread has its own directory (e.g., ST0001/) +- Minimum required file is \`info.md\` with metadata +- Optional files: design.md, impl.md, tasks.md + +## Commands + +- \`intent st new "Title"\` - Create a new steel thread +- \`intent st list\` - List all steel threads +- \`intent st show <id>\` - Show steel thread details +- \`intent doctor\` - Check configuration +- \`intent help\` - Get help + +## Author + +$AUTHOR +EOF + +# Initialize git if not already a repository +if [ ! -d .git ]; then + echo "Initializing git repository..." + git init + + # Create .gitignore + cat > .gitignore << EOF +# Intent configuration +.intent/cache/ + +# OS files +.DS_Store +Thumbs.db + +# Editor files +*.swp +*~ +.vscode/ +.idea/ +EOF +fi + +# Initialize Backlog.md if available +if command -v backlog >/dev/null 2>&1; then + echo "Initializing Backlog.md..." + # Pass project name to backlog init to avoid prompt + echo "$PROJECT_NAME" | backlog init >/dev/null 2>&1 || true + + # Configure backlog for Intent + if [ -f "backlog/.config" ]; then + echo "task_prefix=ST" >> backlog/.config + echo "default_status=todo" >> backlog/.config + fi +fi + +# Check for Claude Code and offer agent installation (only in interactive mode) +if [ -t 0 ] && [ -t 1 ]; then # Check if both stdin and stdout are TTY + if [ -d "$HOME/.claude" ] || command -v claude >/dev/null 2>&1; then + echo "" + echo "Claude Code detected!" + echo -n "Would you like to install the Intent sub-agent for better Claude integration? [Y/n] " + read -r response + + if [[ "$response" =~ ^[Yy]?$ ]]; then + echo "Installing Intent agent..." + if "$SCRIPT_DIR/intent_agents" install intent --force >/dev/null 2>&1; then + echo "Intent agent installed successfully!" + echo "Claude will now understand Intent's steel thread methodology." + else + echo "Note: Could not install agent automatically." + echo "You can install it later with: intent agents install intent" + fi + else + echo "You can install agents later with: intent agents list" + fi + fi +fi + +echo "" +echo "Intent project initialized successfully!" +echo "" +echo "Project: $PROJECT_NAME" +echo "Author: $AUTHOR" +echo "Version: 2.0.0" +echo "" +echo "Next steps:" +echo " 1. Create your first steel thread: intent st new \"Initial Setup\"" +echo " 2. Update intent/wip.md with your current focus" +echo " 3. Review CLAUDE.md for project guidelines" +echo "" +echo "For help, run: intent help" \ No newline at end of file diff --git a/stp/bin/stp_llm b/bin/intent_llm similarity index 78% rename from stp/bin/stp_llm rename to bin/intent_llm index d57f046..ada4f8c 100755 --- a/stp/bin/stp_llm +++ b/bin/intent_llm @@ -1,6 +1,6 @@ #!/bin/bash -# stp_llm - LLM-related commands for STP -# Usage: stp_llm <subcommand> [options] +# intent_llm - Manage LLM prompts and context files +# Usage: intent_llm <subcommand> [options] # Exit on error set -e @@ -14,29 +14,29 @@ error() { # Function to display usage usage() { cat << EOF -Usage: stp llm <subcommand> [options] +Usage: intent llm <subcommand> [options] LLM-related commands for working with AI assistants. Subcommands: - usage_rules Display the STP usage rules for LLMs + usage_rules Display the Intent usage rules for LLMs Options for usage_rules: --symlink [dir] Create a symlink to usage-rules.md in current or specified directory Examples: - stp llm usage_rules # Display usage patterns and workflows - stp llm usage_rules --symlink # Create symlink in current directory - stp llm usage_rules --symlink /tmp # Create symlink in /tmp directory + intent llm usage_rules # Display usage patterns and workflows + intent llm usage_rules --symlink # Create symlink in current directory + intent llm usage_rules --symlink /tmp # Create symlink in /tmp directory For more information on a specific subcommand, run: - stp help llm + intent help llm EOF } -# Check if STP_HOME is set -if [ -z "$STP_HOME" ]; then - error "STP_HOME environment variable is not set" +# Check if INTENT_HOME is set +if [ -z "$INTENT_HOME" ]; then + error "INTENT_HOME environment variable is not set" fi # Check for at least one argument @@ -53,7 +53,7 @@ shift case "$SUBCOMMAND" in usage_rules) # Default behavior is to display the file - USAGE_RULES_FILE="$STP_HOME/stp/eng/usage-rules.md" + USAGE_RULES_FILE="$INTENT_HOME/intent/llm/usage-rules.md" # Check if usage-rules.md exists if [ ! -f "$USAGE_RULES_FILE" ]; then diff --git a/bin/intent_main b/bin/intent_main new file mode 100755 index 0000000..297dceb --- /dev/null +++ b/bin/intent_main @@ -0,0 +1,114 @@ +#!/bin/bash +# intent_main - Main command for Intent (backward compatibility as 'stp') +# Copyright (c) 2024 Matthew Sinclair +# Licensed under the MIT License (see LICENSE file) +# Usage: stp <command> [options] [arguments] + +# Exit on error +set -e + +# Source helpers for version +if [ -z "$INTENT_HOME" ]; then + INTENT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +fi +if [ -f "$INTENT_HOME/bin/intent_helpers" ]; then + source "$INTENT_HOME/bin/intent_helpers" +fi +INTENT_VERSION="$(get_intent_version 2>/dev/null || echo "2.2.1")" + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Determine INTENT_HOME if not set +if [ -z "$INTENT_HOME" ]; then + # First try to determine from the script location + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + if [ -d "$SCRIPT_DIR/../lib" ]; then + # We're likely in the bin directory of an Intent installation + export INTENT_HOME="$(cd "$SCRIPT_DIR/.." && pwd)" + elif [ -d "$SCRIPT_DIR/../../intent/lib" ]; then + # We're in the bin directory under intent + export INTENT_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" + else + # Try to find Intent in common locations + for dir in ~/intent ~/.intent /usr/local/intent; do + if [ -d "$dir" ]; then + export INTENT_HOME="$dir" + break + fi + done + fi + + # If still not found, error out + if [ -z "$INTENT_HOME" ]; then + error "Could not determine INTENT_HOME. Please set it manually." + fi +fi + +# Check if bin directory exists +if [ ! -d "$INTENT_HOME/bin" ]; then + error "Invalid INTENT_HOME: bin directory not found at $INTENT_HOME/bin" +fi + +# Set bin directory +BIN_DIR="$INTENT_HOME/bin" + +# Source config library +if [ -f "$BIN_DIR/intent_config" ]; then + source "$BIN_DIR/intent_config" + # Load configuration + load_intent_config +fi + +# Display help if no arguments provided +if [ $# -eq 0 ]; then + exec "$BIN_DIR/intent_help" +fi + +# Get the command +COMMAND="$1" +shift + +# Handle version flag +if [ "$COMMAND" = "--version" ] || [ "$COMMAND" = "-v" ] || [ "$COMMAND" = "version" ]; then + echo "Intent version $INTENT_VERSION (stp compatibility mode)" + exit 0 +fi + +# Handle help command specially +if [ "$COMMAND" = "help" ] || [ "$COMMAND" = "--help" ] || [ "$COMMAND" = "-h" ]; then + exec "$BIN_DIR/intent_help" "$@" +fi + +# Map shortened commands to full names +case "$COMMAND" in + st) + COMMAND="st" + COMMAND_SCRIPT="intent_st" + ;; + bl) + COMMAND="bl" + COMMAND_SCRIPT="intent_bl" + ;; + *) + # Default: prefix with intent_ + COMMAND_SCRIPT="intent_$COMMAND" + ;; +esac + +# Check if command script exists +if [ ! -f "$BIN_DIR/$COMMAND_SCRIPT" ]; then + error "Unknown command '$COMMAND'. Run 'stp help' for usage information." +fi + +# Check if script is executable +if [ ! -x "$COMMAND_SCRIPT" ]; then + echo "Warning: Making script executable: $COMMAND_SCRIPT" >&2 + chmod +x "$COMMAND_SCRIPT" +fi + +# Execute command with remaining arguments +exec "$BIN_DIR/$COMMAND_SCRIPT" "$@" \ No newline at end of file diff --git a/stp/bin/stp_migrate b/bin/intent_migrate similarity index 84% rename from stp/bin/stp_migrate rename to bin/intent_migrate index 1869429..6425e75 100755 --- a/stp/bin/stp_migrate +++ b/bin/intent_migrate @@ -1,5 +1,5 @@ #!/bin/bash -# stp_migrate - Migrate embedded tasks from steel threads to Backlog +# intent_migrate - Migrate tasks from steel threads to Backlog # Usage: stp migrate [options] <ST####> # Exit on error @@ -11,17 +11,17 @@ error() { exit 1 } -# Get STP_HOME from environment or determine from script location -if [ -z "$STP_HOME" ]; then +# Get INTENT_HOME from environment or determine from script location +if [ -z "$INTENT_HOME" ]; then SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - export STP_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" + export INTENT_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" fi # Check if backlog is installed if ! command -v backlog &> /dev/null; then echo "Error: Backlog.md is not installed" >&2 echo "" >&2 - cat "$STP_HOME/stp/bin/.help/backlog-install.help.md" >&2 + cat "$INTENT_HOME/bin/.help/backlog-install.help.md" >&2 exit 1 fi @@ -60,15 +60,15 @@ find_steel_thread() { local st_file="" # Check main directory - if [ -f "$STP_HOME/stp/prj/st/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/${st_id}.md" + if [ -f "$INTENT_HOME/intent/st/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/${st_id}.md" # Check subdirectories - elif [ -f "$STP_HOME/stp/prj/st/COMPLETED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/COMPLETED/${st_id}.md" - elif [ -f "$STP_HOME/stp/prj/st/NOT-STARTED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/NOT-STARTED/${st_id}.md" - elif [ -f "$STP_HOME/stp/prj/st/CANCELLED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/CANCELLED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/COMPLETED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/COMPLETED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/NOT-STARTED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/NOT-STARTED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/CANCELLED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/CANCELLED/${st_id}.md" fi echo "$st_file" @@ -122,14 +122,14 @@ create_backlog_task() { echo " Creating task: $title" - # Use stp bl wrapper to create the task - if output=$("$STP_HOME/stp/bin/stp" bl create "$st_id" "$description" 2>&1); then + # Use intent bl wrapper to create the task + if output=$("$INTENT_HOME/bin/intent" bl create "$st_id" "$description" 2>&1); then # Extract task ID from output task_id=$(echo "$output" | grep -oE "task-[0-9]+" | head -1) if [ -n "$task_id" ] && [ "$status" = "done" ]; then # Update status to Done (capital D for Backlog) - "$STP_HOME/stp/bin/stp" bl task edit "$task_id" --status Done >/dev/null 2>&1 + "$INTENT_HOME/bin/intent" bl task edit "$task_id" --status Done >/dev/null 2>&1 echo " Task created and marked as done: $task_id" else echo " Task created: $task_id" @@ -161,7 +161,7 @@ update_steel_thread() { BEGIN { in_tasks = 0; tasks_replaced = 0 } /^## Tasks/ { print $0 - print "Tasks are tracked in Backlog. View with: `stp task list '"$st_id"'`" + print "Tasks are tracked in Backlog. View with: `intent task list '"$st_id"'`" print "" in_tasks = 1 tasks_replaced = 1 @@ -235,7 +235,7 @@ migrate_steel_thread() { # Get all active steel threads get_active_threads() { # Get threads that are In Progress, Not Started, or On Hold - "$STP_HOME/stp/bin/stp" st list | grep -E "(In Progress|Not Started|On Hold)" | awk -F'|' '{print $1}' | grep -oE "ST[0-9]{4}" + "$INTENT_HOME/bin/intent" st list | grep -E "(In Progress|Not Started|On Hold)" | awk -F'|' '{print $1}' | grep -oE "ST[0-9]{4}" } # Main processing diff --git a/bin/intent_minimal b/bin/intent_minimal new file mode 100755 index 0000000..2178a59 --- /dev/null +++ b/bin/intent_minimal @@ -0,0 +1,83 @@ +#!/bin/bash +# intent_minimal - Minimal Intent wrapper for testing +# This is a minimal wrapper for Phase 1 testing + +# Version +VERSION="2.0.0-alpha" + +# Determine INTENT_HOME if not set +if [ -z "$INTENT_HOME" ]; then + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + export INTENT_HOME="$(dirname "$SCRIPT_DIR")" +fi + +# Source config library +if [ -f "$INTENT_HOME/bin/intent_config" ]; then + source "$INTENT_HOME/bin/intent_config" +fi + +# Function to display help +show_help() { + cat << EOF +Intent v$VERSION - The Steel Thread Process Tool + +Usage: intent <command> [options] [arguments] + +Commands: + bootstrap Initial setup for Intent + doctor Check and fix configuration issues + init Initialize a new Intent project + upgrade Upgrade existing STP project to Intent v2.0.0 + help Show this help message + version Show version information + +More commands will be available after full migration to v2.0.0. + +For help on a specific command: + intent <command> --help + +Examples: + intent bootstrap # Set up Intent for first use + intent doctor # Check your configuration + intent doctor --fix # Fix configuration issues + +EOF +} + +# Handle version flag +if [ "$1" = "--version" ] || [ "$1" = "-v" ] || [ "$1" = "version" ]; then + echo "Intent version $VERSION" + echo "Migration phase: 1 (New Commands)" + exit 0 +fi + +# Handle help +if [ $# -eq 0 ] || [ "$1" = "help" ] || [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + show_help + exit 0 +fi + +# Get the command +COMMAND="$1" +shift + +# Route to appropriate command +case "$COMMAND" in + bootstrap) + exec "$INTENT_HOME/bin/intent_bootstrap" "$@" + ;; + doctor) + exec "$INTENT_HOME/bin/intent_doctor" "$@" + ;; + init) + exec "$INTENT_HOME/bin/intent_init" "$@" + ;; + upgrade) + exec "$INTENT_HOME/bin/intent_upgrade" "$@" + ;; + *) + echo "Error: Unknown command '$COMMAND'" >&2 + echo "Run 'intent help' for usage information" >&2 + exit 1 + ;; +esac \ No newline at end of file diff --git a/bin/intent_organise b/bin/intent_organise new file mode 100755 index 0000000..5ae82c7 --- /dev/null +++ b/bin/intent_organise @@ -0,0 +1,210 @@ +#!/bin/bash +# intent_organise - Organize steel threads by status +# Usage: intent organise [--dry-run] + +# Exit on error +set -e + +# Source common libraries +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/intent_config" +source "$SCRIPT_DIR/intent_helpers" + +# Load configuration +load_intent_config + +# Function to display usage information +usage() { + cat << EOF +Usage: intent organise [--dry-run] + +Organize steel threads into status directories based on their metadata + +Options: + --dry-run Preview changes without making them + -h, --help Show this help message + +This command will: +1. Read the status from each steel thread's info.md +2. Create status directories (COMPLETED, NOT-STARTED, CANCELLED, etc.) +3. Move steel threads to appropriate directories +4. Keep active/in-progress threads at the root level + +Example: + intent organise # Organize all steel threads + intent organise --dry-run # Preview what would be moved +EOF + exit 0 +} + +# Parse options +DRY_RUN=false + +while [[ $# -gt 0 ]]; do + case "$1" in + --dry-run) + DRY_RUN=true + shift + ;; + -h|--help) + usage + ;; + *) + error "Unknown option: $1" + ;; + esac +done + +# Check if we're in an Intent project +if [ ! -d "$INTENT_DIR/st" ]; then + error "No $INTENT_DIR/st directory found. Are you in an Intent project?" +fi + +# Function to extract status from info.md +get_st_status() { + local info_file=$1 + local status="" + + if [ -f "$info_file" ]; then + # Try to extract status from YAML frontmatter (handle malformed YAML) + # The YAML might be on one line with \n separators + status=$(sed -n '2p' "$info_file" | grep -o 'status:[^\\]*' | sed 's/status:[[:space:]]*//' | sed 's/[[:space:]]*$//') + + # Normalize status (convert to lowercase) + status_lower=$(echo "$status" | tr '[:upper:]' '[:lower:]') + case "$status_lower" in + completed|complete) + echo "COMPLETED" + ;; + not*started|"not started"|planned) + echo "NOT-STARTED" + ;; + cancelled|canceled) + echo "CANCELLED" + ;; + *progress*|active|started) + echo "ACTIVE" + ;; + "") + # If no status in YAML, try to find it in markdown + if grep -qi "Status.*Completed" "$info_file"; then + echo "COMPLETED" + elif grep -qi "Status.*Not.Started" "$info_file"; then + echo "NOT-STARTED" + elif grep -qi "Status.*Cancelled" "$info_file"; then + echo "CANCELLED" + elif grep -qi "Status.*Progress" "$info_file"; then + echo "ACTIVE" + else + echo "UNKNOWN" + fi + ;; + *) + echo "ACTIVE" + ;; + esac + else + echo "UNKNOWN" + fi +} + +# Create status directories +status_dirs=("COMPLETED" "NOT-STARTED" "CANCELLED") +for dir in "${status_dirs[@]}"; do + if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN] Would create directory: $INTENT_DIR/st/$dir" + else + mkdir -p "$INTENT_DIR/st/$dir" + fi +done + +# Process all steel threads +moved_count=0 +kept_count=0 + +# First, process STs at root level +for st_dir in "$INTENT_DIR/st"/ST*/; do + if [ -d "$st_dir" ]; then + st_name=$(basename "$st_dir") + info_file="$st_dir/info.md" + + status=$(get_st_status "$info_file") + + case "$status" in + COMPLETED|NOT-STARTED|CANCELLED) + if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN] Would move $st_name to $status/" + else + echo "Moving $st_name to $status/" + mv "$st_dir" "$INTENT_DIR/st/$status/" + fi + ((moved_count++)) + ;; + ACTIVE|UNKNOWN) + echo "Keeping $st_name at root (status: $status)" + ((kept_count++)) + ;; + esac + fi +done + +# Then, check if any STs are in wrong status directories +for status_dir in "${status_dirs[@]}"; do + if [ -d "$INTENT_DIR/st/$status_dir" ]; then + for st_dir in "$INTENT_DIR/st/$status_dir"/ST*/; do + if [ -d "$st_dir" ]; then + st_name=$(basename "$st_dir") + info_file="$st_dir/info.md" + + actual_status=$(get_st_status "$info_file") + + if [ "$actual_status" != "$status_dir" ]; then + if [ "$actual_status" = "ACTIVE" ] || [ "$actual_status" = "UNKNOWN" ]; then + # Move to root + if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN] Would move $st_name from $status_dir/ to root" + else + echo "Moving $st_name from $status_dir/ to root" + mv "$st_dir" "$INTENT_DIR/st/" + fi + else + # Move to correct status dir + if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN] Would move $st_name from $status_dir/ to $actual_status/" + else + echo "Moving $st_name from $status_dir/ to $actual_status/" + mv "$st_dir" "$INTENT_DIR/st/$actual_status/" + fi + fi + ((moved_count++)) + fi + fi + done + fi +done + +# Summary +echo "" +if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN] Would have moved $moved_count steel threads" + echo "[DRY RUN] Would have kept $kept_count steel threads at root" +else + echo "Organization complete:" + echo "- Moved $moved_count steel threads to status directories" + echo "- Kept $kept_count steel threads at root" + + # Show current structure + echo "" + echo "Current structure:" + for dir in "$INTENT_DIR/st"/*; do + if [ -d "$dir" ]; then + name=$(basename "$dir") + if [[ "$name" =~ ^ST[0-9]+ ]]; then + echo "- $name (active)" + else + count=$(find "$dir" -name "ST*" -type d | wc -l) + echo "- $name/ ($count steel threads)" + fi + fi + done +fi \ No newline at end of file diff --git a/stp/bin/stp_st b/bin/intent_st similarity index 52% rename from stp/bin/stp_st rename to bin/intent_st index 3e5c4c4..0b7b5e5 100755 --- a/stp/bin/stp_st +++ b/bin/intent_st @@ -1,6 +1,6 @@ #!/bin/bash -# stp_st - Manage steel threads (v1.2.1 - Directory-based structure) -# Usage: stp_st <command> [options] [arguments] +# intent_st - Manage steel threads in directory structure +# Usage: intent_st <command> [options] [arguments] # Exit on error set -e @@ -13,34 +13,54 @@ error() { # Function to display usage information usage() { - echo "Usage: stp st <command> [options] [arguments]" + echo "Usage: intent st <command> [options] [arguments]" echo "" echo "Manage steel threads for the project" echo "" echo "Commands:" echo " new <title> Create a new steel thread" + echo " start <id> Mark a steel thread as in progress" echo " done <id> Mark a steel thread as complete" - echo " list [--status <status>] [--width N] List all steel threads" + echo " list [--status <status>] [--width N] List steel threads (default: in progress only)" echo " sync [--write] [--width N] Synchronize steel_threads.md with individual ST files" echo " organize [--write] Organize ST files in directories by status" echo " show <id> [file] Show details of a specific steel thread" echo " edit <id> [file] Open a steel thread in your default editor" + echo " repair [id] [--write] Repair malformed steel thread metadata" echo "" echo "File options for show/edit commands:" echo " info - Main information file (default)" echo " design - Design decisions and approach" echo " impl - Implementation details" echo " tasks - Task tracking" - echo " results - Results and outcomes" echo " all - Show all files combined (show only)" echo "" + echo "" + echo "Status options for list command:" + echo " Default (no --status) Show only WIP threads" + echo " --status all Show all threads grouped by status" + echo " --status \"status1,status2\" Show specific statuses in order" + echo "" + echo "Valid status values (case-insensitive, normalised to caps):" + echo " wip, in progress → WIP Work in progress" + echo " tbc, not started → TBC To be commenced" + echo " completed, done → COMPLETED Completed work" + echo " cancelled, canceled → CANCELLED Cancelled threads" + echo " hold, on hold → HOLD On hold" + echo "" echo "Examples:" - echo " stp st new \"Implement Feature X\"" - echo " stp st done ST0001" - echo " stp st list --status \"In Progress\" --width 100" - echo " stp st show ST0001" - echo " stp st show ST0001 design" - echo " stp st edit ST0001 impl" + echo " intent st new \"Implement Feature X\"" + echo " intent st start ST0001" + echo " intent st done ST0001" + echo " intent st list # Show only in-progress threads" + echo " intent st list --status all # Show all threads grouped by status" + echo " intent st list --status \"wip,notstarted\" # Show WIP first, then not started" + echo " intent st show ST0001" + echo " intent st show ST0001 design" + echo " intent st edit ST0001 impl" + echo " intent st repair # Dry run repair on all steel threads" + echo " intent st repair --write # Actually repair all steel threads" + echo " intent st repair ST0001 --write # Repair specific steel thread" exit 1 } @@ -51,8 +71,12 @@ if [ $# -lt 1 ]; then fi # Load project configuration if available -if [ -f stp/.config/config ]; then - source stp/.config/config +if [ -f .intent/config.json ]; then + # Parse JSON config (simplified extraction) + ST_PREFIX=$(grep -oE '"st_prefix"[[:space:]]*:[[:space:]]*"[^"]+"' .intent/config.json | cut -d'"' -f4 || echo "ST") + AUTHOR=$(grep -oE '"author"[[:space:]]*:[[:space:]]*"[^"]+"' .intent/config.json | cut -d'"' -f4 || echo "$USER") +elif [ -f intent/.config/config ]; then + source intent/.config/config elif [ -f .stp-config ]; then # For backward compatibility source .stp-config @@ -64,6 +88,16 @@ shift # Function to check if we're using directory structure (v1.2.1+) is_directory_structure() { + # Intent v2.0.0 always uses directory structure + if [ -f .intent/config.json ]; then + return 0 # true - using directory structure + fi + # Check legacy version for backward compatibility + local intent_version=$(grep -m 1 "^intent_version:" intent/.config/version 2>/dev/null | sed "s/^intent_version: *//") + if [ -n "$intent_version" ]; then + return 0 # true - using directory structure + fi + # Check even older stp_version local stp_version=$(grep -m 1 "^stp_version:" stp/.config/version 2>/dev/null | sed "s/^stp_version: *//") if [[ "$stp_version" > "1.2.0" ]] || [[ "$stp_version" == "1.2.1" ]]; then return 0 # true - using directory structure @@ -72,27 +106,57 @@ is_directory_structure() { fi } +# Function to normalise status values +normalise_status() { + local status="$1" + local normalised="" + + # Convert to lowercase for comparison + local status_lower=$(echo "$status" | tr '[:upper:]' '[:lower:]') + + case "$status_lower" in + "wip"|"in progress"|"inprogress"|"in-progress") + normalised="WIP" + ;; + "not started"|"notstarted"|"not-started"|"tbc"|"to be commenced") + normalised="TBC" + ;; + "completed"|"complete"|"done") + normalised="COMPLETED" + ;; + "cancelled"|"canceled") + normalised="CANCELLED" + ;; + "on hold"|"onhold"|"on-hold"|"hold") + normalised="HOLD" + ;; + "all") + normalised="all" + ;; + *) + # Return original if no match + normalised="$status" + ;; + esac + + echo "$normalised" +} + # Function to determine the appropriate path for a steel thread based on its status get_st_path() { local st_id="$1" local status="$2" local file_name="${3:-info.md}" # Default to info.md - local base_dir="stp/prj/st" + local base_dir="intent/st" local test_env=0 # Check if we're in a test environment - if [[ "${TEST_TEMP_DIR:-}" != "" ]] || [[ "$(pwd)" == /tmp* ]] || [[ "$(pwd)" == */stp/tests/* ]]; then + if [[ "${TEST_TEMP_DIR:-}" != "" ]] || [[ "$(pwd)" == /tmp* ]] || [[ "$(pwd)" == */intent/tests/* ]]; then test_env=1 fi # For directory structure if is_directory_structure; then - # If we're in a test environment, just use the main directory - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id/$file_name" - return - fi - # If status is not provided, try to find the steel thread directory if [ -z "$status" ]; then # Check all possible locations @@ -119,58 +183,52 @@ get_st_path() { status="Not Started" fi fi - break + # Return the actual found path + echo "$location/$file_name" + return fi done fi # Return the appropriate directory based on status - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id/$file_name" - else - case "$status" in - "Completed") - echo "$base_dir/COMPLETED/$st_id/$file_name" - ;; - "Not Started") - echo "$base_dir/NOT-STARTED/$st_id/$file_name" - ;; - "Cancelled") - echo "$base_dir/CANCELLED/$st_id/$file_name" - ;; - *) - # In Progress or On Hold stay in the main directory - echo "$base_dir/$st_id/$file_name" - ;; - esac - fi + case "$status" in + "Completed") + echo "$base_dir/COMPLETED/$st_id/$file_name" + ;; + "Not Started") + echo "$base_dir/NOT-STARTED/$st_id/$file_name" + ;; + "Cancelled") + echo "$base_dir/CANCELLED/$st_id/$file_name" + ;; + *) + # In Progress or On Hold stay in the main directory + echo "$base_dir/$st_id/$file_name" + ;; + esac else # Legacy file structure - ignore file_name parameter - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id.md" - else - case "$status" in - "Completed") - echo "$base_dir/COMPLETED/$st_id.md" - ;; - "Not Started") - echo "$base_dir/NOT-STARTED/$st_id.md" - ;; - "Cancelled") - echo "$base_dir/CANCELLED/$st_id.md" - ;; - *) - echo "$base_dir/$st_id.md" - ;; - esac - fi + case "$status" in + "Completed") + echo "$base_dir/COMPLETED/$st_id.md" + ;; + "Not Started") + echo "$base_dir/NOT-STARTED/$st_id.md" + ;; + "Cancelled") + echo "$base_dir/CANCELLED/$st_id.md" + ;; + *) + echo "$base_dir/$st_id.md" + ;; + esac fi } # Function to get the next steel thread ID get_next_steel_thread_id() { local st_prefix="${ST_PREFIX:-ST}" - local base_dir="stp/prj/st" + local base_dir="intent/st" local next_id=1 local max_id=0 @@ -218,7 +276,7 @@ update_steel_threads_index() { local status="$3" local created="$4" local completed="$5" - local index_file="stp/prj/st/steel_threads.md" + local index_file="intent/st/steel_threads.md" # Create index file if it doesn't exist if [ ! -f "$index_file" ]; then @@ -264,7 +322,7 @@ case "$ST_COMMAND" in ST_ID=$(get_next_steel_thread_id) ST_STATUS="Not Started" DATE=$(date '+%Y-%m-%d') - AUTHOR="${STP_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" + AUTHOR="${INTENT_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" if is_directory_structure; then # Create directory structure @@ -274,11 +332,11 @@ case "$ST_COMMAND" in # Create files from templates # Try to find templates relative to script location SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - TEMPLATE_DIR="${SCRIPT_DIR}/../_templ/prj/st/ST####" + TEMPLATE_DIR="${INTENT_HOME}/lib/templates/prj/st/ST####" - # Also check in current directory (for tests) - if [ ! -d "$TEMPLATE_DIR" ] && [ -d "stp/_templ/prj/st/ST####" ]; then - TEMPLATE_DIR="stp/_templ/prj/st/ST####" + # Also check legacy location + if [ ! -d "$TEMPLATE_DIR" ] && [ -d "${INTENT_HOME}/stp/_templ/prj/st/ST####" ]; then + TEMPLATE_DIR="${INTENT_HOME}/stp/_templ/prj/st/ST####" fi if [ -d "$TEMPLATE_DIR" ]; then @@ -308,7 +366,7 @@ case "$ST_COMMAND" in cat > "$ST_DIR/info.md" << EOF --- verblock: "$(date '+%d %b %Y'):v0.1: $AUTHOR - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: $ST_STATUS created: $(date '+%Y%m%d') completed: @@ -341,7 +399,7 @@ EOF ST_FILE=$(get_st_path "$ST_ID" "$ST_STATUS") mkdir -p "$(dirname "$ST_FILE")" - if [ -f "stp/_templ/prj/st/_ST####.md" ]; then + if [ -f "${INTENT_HOME}/lib/templates/prj/st/_ST####.md" ]; then DATE_COMPACT=$(date '+%Y%m%d') sed -e "s/ST####/$ST_ID/g" \ @@ -350,7 +408,7 @@ EOF -e "s/YYYY-MM-DD/$DATE/g" \ -e "s/YYYYMMDD/$DATE_COMPACT/g" \ -e "s/\[Author Name\]/$AUTHOR/g" \ - "stp/_templ/prj/st/_ST####.md" > "$ST_FILE" + "${INTENT_HOME}/lib/templates/prj/st/_ST####.md" > "$ST_FILE" fi echo "Created steel thread: $ST_ID: $TITLE" @@ -444,6 +502,132 @@ EOF echo "Marked steel thread as complete: $ST_ID: $TITLE" ;; + "start") + # Check for required arguments + if [ $# -lt 1 ]; then + error "Steel thread ID is required" + usage + fi + + # Process the steel thread ID + ST_ID="$1" + + # If just a number is provided, format it as ST#### (with leading zeros) + if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then + ST_ID=$(printf "ST%04d" "$ST_ID") + # If the ID doesn't start with ST, prepend it + elif [[ ! "$ST_ID" =~ ^ST ]]; then + ST_ID="ST$ST_ID" + fi + + DATE=$(date '+%Y-%m-%d') + + if is_directory_structure; then + # Find the info.md file + ST_FILE=$(get_st_path "$ST_ID" "" "info.md") + + # Check if steel thread exists + if [ ! -f "$ST_FILE" ]; then + error "Steel thread not found: $ST_ID" + fi + + # Extract title and current status + TITLE=$(grep "^# $ST_ID:" "$ST_FILE" | sed "s/^# $ST_ID: //") + YAML_STATUS=$(grep -m 1 "^status:" "$ST_FILE" | sed "s/^status: *//") + BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$ST_FILE" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + + if [ -n "$YAML_STATUS" ]; then + CURRENT_STATUS="$YAML_STATUS" + elif [ -n "$BODY_STATUS" ]; then + CURRENT_STATUS="$BODY_STATUS" + else + CURRENT_STATUS="Not Started" + fi + + # Check if already in progress + if [ "$CURRENT_STATUS" = "In Progress" ]; then + echo "Steel thread is already in progress: $ST_ID: $TITLE" + exit 0 + fi + + # Update status to WIP + sed -i.bak "s/^\- \*\*Status\*\*: .*$/- **Status**: WIP/" "$ST_FILE" + sed -i.bak "s/^status: .*$/status: WIP/" "$ST_FILE" + rm -f "$ST_FILE.bak" + + # Get current directory + CURRENT_DIR=$(dirname "$ST_FILE") + + # Get the target location (main directory for WIP threads) + NEW_ST_FILE=$(get_st_path "$ST_ID" "WIP" "info.md") + NEW_DIR=$(dirname "$NEW_ST_FILE") + + # Move the entire directory if it's different (e.g., from NOT-STARTED/) + if [ "$CURRENT_DIR" != "$NEW_DIR" ]; then + mkdir -p "$(dirname "$NEW_DIR")" + mv "$CURRENT_DIR" "$NEW_DIR" + echo "Moved steel thread to: $NEW_DIR" + fi + else + # Legacy: Handle single file + ST_FILE=$(get_st_path "$ST_ID") + + if [ ! -f "$ST_FILE" ]; then + error "Steel thread not found: $ST_ID" + fi + + TITLE=$(grep "^# $ST_ID:" "$ST_FILE" | sed "s/^# $ST_ID: //") + YAML_STATUS=$(grep -m 1 "^status:" "$ST_FILE" | sed "s/^status: *//") + BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$ST_FILE" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + + if [ -n "$YAML_STATUS" ]; then + CURRENT_STATUS="$YAML_STATUS" + elif [ -n "$BODY_STATUS" ]; then + CURRENT_STATUS="$BODY_STATUS" + else + CURRENT_STATUS="Not Started" + fi + + # Check if already in progress + if [ "$CURRENT_STATUS" = "In Progress" ]; then + echo "Steel thread is already in progress: $ST_ID: $TITLE" + exit 0 + fi + + sed -i.bak "s/^\- \*\*Status\*\*: .*$/- **Status**: WIP/" "$ST_FILE" + sed -i.bak "s/^status: .*$/status: WIP/" "$ST_FILE" + rm -f "$ST_FILE.bak" + + NEW_ST_FILE=$(get_st_path "$ST_ID" "WIP") + + if [ "$ST_FILE" != "$NEW_ST_FILE" ]; then + mkdir -p "$(dirname "$NEW_ST_FILE")" + mv "$ST_FILE" "$NEW_ST_FILE" + echo "Moved steel thread to: $NEW_ST_FILE" + fi + fi + + # Extract created date for index update + CREATED=$(grep -m 1 "^\- \*\*Created\*\*:" "$ST_FILE" 2>/dev/null | sed 's/^\- \*\*Created\*\*: //' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + if [ -z "$CREATED" ] || [ "$CREATED" = "YYYY-MM-DD" ]; then + YAML_CREATED=$(grep -m 1 "^created:" "$ST_FILE" 2>/dev/null | sed "s/^created: *//") + if [ -n "$YAML_CREATED" ] && [ "$YAML_CREATED" != "YYYYMMDD" ]; then + if [[ "$YAML_CREATED" =~ ^[0-9]{8}$ ]]; then + CREATED="${YAML_CREATED:0:4}-${YAML_CREATED:4:2}-${YAML_CREATED:6:2}" + else + CREATED="$YAML_CREATED" + fi + else + CREATED="$DATE" + fi + fi + + # Update index + update_steel_threads_index "$ST_ID" "$TITLE" "WIP" "$CREATED" "" + + echo "Marked steel thread as in progress: $ST_ID: $TITLE" + ;; + "list") # Parse options STATUS="" @@ -466,7 +650,26 @@ EOF esac done - ST_DIR="stp/prj/st" + # Process status filter + declare -a STATUS_FILTER=() + if [ -z "$STATUS" ]; then + # Default: show only WIP threads (includes both WIP and In Progress) + STATUS_FILTER=("WIP") + elif [ "$STATUS" = "all" ]; then + # Special case: show all statuses + STATUS_FILTER=("all") + else + # Parse comma-separated list and normalise each status + IFS=',' read -ra STATUS_LIST <<< "$STATUS" + for status in "${STATUS_LIST[@]}"; do + # Trim whitespace + status=$(echo "$status" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + normalised=$(normalise_status "$status") + STATUS_FILTER+=("$normalised") + done + fi + + ST_DIR="intent/st" # Check if ST directory exists if [ ! -d "$ST_DIR" ]; then @@ -632,9 +835,22 @@ EOF fi fi - # Skip if the requested status doesn't match - if [ -n "$STATUS" ] && [ "$STATUS" != "$ST_STATUS" ]; then - continue + # Check if this status should be included + if [ "${STATUS_FILTER[0]}" != "all" ]; then + # Check if ST_STATUS is in the filter array + include_thread=0 + for filter_status in "${STATUS_FILTER[@]}"; do + # Normalise both sides for comparison + normalised_filter=$(normalise_status "$filter_status") + normalised_st=$(normalise_status "$ST_STATUS") + if [ "$normalised_filter" = "$normalised_st" ]; then + include_thread=1 + break + fi + done + if [ $include_thread -eq 0 ]; then + continue + fi fi st_data+=("$ID|$TITLE|$ST_STATUS|$CREATED|$COMPLETED") @@ -687,8 +903,22 @@ EOF fi fi - if [ -n "$STATUS" ] && [ "$STATUS" != "$ST_STATUS" ]; then - continue + # Check if this status should be included + if [ "${STATUS_FILTER[0]}" != "all" ]; then + # Check if ST_STATUS is in the filter array + include_thread=0 + for filter_status in "${STATUS_FILTER[@]}"; do + # Normalise both sides for comparison + normalised_filter=$(normalise_status "$filter_status") + normalised_st=$(normalise_status "$ST_STATUS") + if [ "$normalised_filter" = "$normalised_st" ]; then + include_thread=1 + break + fi + done + if [ $include_thread -eq 0 ]; then + continue + fi fi st_data+=("$ID|$TITLE|$ST_STATUS|$CREATED|$COMPLETED") @@ -696,28 +926,109 @@ EOF done fi - # Sort by ID in reverse order (newest first) - IFS=$'\n' sorted_data=($(sort -r <<<"${st_data[*]}")) - unset IFS - - # Process and display rows - for line in "${sorted_data[@]}"; do - ID=$(echo "$line" | cut -d'|' -f1) - TITLE=$(echo "$line" | cut -d'|' -f2) - ST_STATUS=$(echo "$line" | cut -d'|' -f3) - CREATED=$(echo "$line" | cut -d'|' -f4) - COMPLETED=$(echo "$line" | cut -d'|' -f5) - - # Truncate values if needed - ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) - TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) - STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) - CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) - COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) - - printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ - "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" - done + # Sort and display based on status filter + if [ "${STATUS_FILTER[0]}" = "all" ]; then + # Group by status with predefined order + declare -a status_order=("WIP" "In Progress" "TBC" "Not Started" "HOLD" "On Hold" "COMPLETED" "Completed" "CANCELLED" "Cancelled") + + for group_status in "${status_order[@]}"; do + # Filter data for this status + declare -a status_group=() + for line in "${st_data[@]}"; do + line_status=$(echo "$line" | cut -d'|' -f3) + # For "all" grouping, we need exact match since status_order has both forms + if [ "$line_status" = "$group_status" ]; then + status_group+=("$line") + fi + done + + # Sort this group by ID in reverse order and display + if [ ${#status_group[@]} -gt 0 ]; then + IFS=$'\n' sorted_group=($(sort -r <<<"${status_group[*]}")) + unset IFS + + for line in "${sorted_group[@]}"; do + ID=$(echo "$line" | cut -d'|' -f1) + TITLE=$(echo "$line" | cut -d'|' -f2) + ST_STATUS=$(echo "$line" | cut -d'|' -f3) + CREATED=$(echo "$line" | cut -d'|' -f4) + COMPLETED=$(echo "$line" | cut -d'|' -f5) + + # Truncate values if needed + ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) + TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) + STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) + CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) + COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) + + printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ + "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" + done + fi + done + elif [ ${#STATUS_FILTER[@]} -gt 1 ]; then + # Multiple statuses specified - display in order requested + for filter_status in "${STATUS_FILTER[@]}"; do + # Filter data for this status + declare -a status_group=() + for line in "${st_data[@]}"; do + line_status=$(echo "$line" | cut -d'|' -f3) + # Normalise both for comparison + normalised_line=$(normalise_status "$line_status") + normalised_filter=$(normalise_status "$filter_status") + if [ "$normalised_line" = "$normalised_filter" ]; then + status_group+=("$line") + fi + done + + # Sort this group by ID in reverse order and display + if [ ${#status_group[@]} -gt 0 ]; then + IFS=$'\n' sorted_group=($(sort -r <<<"${status_group[*]}")) + unset IFS + + for line in "${sorted_group[@]}"; do + ID=$(echo "$line" | cut -d'|' -f1) + TITLE=$(echo "$line" | cut -d'|' -f2) + ST_STATUS=$(echo "$line" | cut -d'|' -f3) + CREATED=$(echo "$line" | cut -d'|' -f4) + COMPLETED=$(echo "$line" | cut -d'|' -f5) + + # Truncate values if needed + ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) + TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) + STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) + CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) + COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) + + printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ + "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" + done + fi + done + else + # Single status or default - sort by ID in reverse order (newest first) + IFS=$'\n' sorted_data=($(sort -r <<<"${st_data[*]}")) + unset IFS + + # Process and display rows + for line in "${sorted_data[@]}"; do + ID=$(echo "$line" | cut -d'|' -f1) + TITLE=$(echo "$line" | cut -d'|' -f2) + ST_STATUS=$(echo "$line" | cut -d'|' -f3) + CREATED=$(echo "$line" | cut -d'|' -f4) + COMPLETED=$(echo "$line" | cut -d'|' -f5) + + # Truncate values if needed + ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) + TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) + STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) + CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) + COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) + + printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ + "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" + done + fi ;; "show") @@ -748,7 +1059,7 @@ EOF fi # Display each file with a header - for file in info.md design.md impl.md tasks.md results.md; do + for file in info.md design.md impl.md tasks.md; do if [ -f "$ST_DIR/$file" ]; then echo "=== $file ===" cat "$ST_DIR/$file" @@ -758,7 +1069,7 @@ EOF else # Show specific file case "$FILE_TYPE" in - info|design|impl|tasks|results) + info|design|impl|tasks) ST_FILE=$(get_st_path "$ST_ID" "" "$FILE_TYPE.md") ;; *) @@ -805,7 +1116,7 @@ EOF if is_directory_structure; then # Edit specific file case "$FILE_TYPE" in - info|design|impl|tasks|results) + info|design|impl|tasks) ST_FILE=$(get_st_path "$ST_ID" "" "$FILE_TYPE.md") ;; *) @@ -872,7 +1183,7 @@ EOF done # Paths - ST_DIR="stp/prj/st" + ST_DIR="intent/st" INDEX_FILE="$ST_DIR/steel_threads.md" # Basic validation @@ -910,6 +1221,251 @@ EOF fi ;; + "repair") + # Parse options + WRITE_MODE=0 + SPECIFIC_ST="" + + while [ $# -gt 0 ]; do + case "$1" in + --write) + WRITE_MODE=1 + shift + ;; + ST[0-9][0-9][0-9][0-9]) + SPECIFIC_ST="$1" + shift + ;; + [0-9][0-9][0-9][0-9]) + SPECIFIC_ST="ST$1" + shift + ;; + [0-9]+) + SPECIFIC_ST=$(printf "ST%04d" "$1") + shift + ;; + *) + error "Unknown option or invalid steel thread ID: $1" + ;; + esac + done + + BASE_DIR="intent/st" + + # Function to repair a single steel thread + repair_steel_thread() { + local st_dir="$1" + local info_file="$st_dir/info.md" + local st_id=$(basename "$st_dir") + local changes_made=0 + + if [ ! -f "$info_file" ]; then + echo "Warning: No info.md found for $st_id" + return + fi + + echo "Processing: $st_id" + + # Read the file content + local content=$(cat "$info_file") + + # Check if frontmatter is malformed (all on one line with \n) + if echo "$content" | grep -q '^---$' && echo "$content" | sed -n '2p' | grep -q '\\n'; then + echo " - Found malformed frontmatter" + changes_made=1 + + if [ $WRITE_MODE -eq 1 ]; then + # Extract the malformed line + local malformed_line=$(echo "$content" | sed -n '2p') + + # Split the line by \n and create proper YAML + local temp_file=$(mktemp) + echo "---" > "$temp_file" + + # Process each field + echo "$malformed_line" | sed 's/\\n/\n/g' | while IFS= read -r field; do + if [ -n "$field" ]; then + # Update stp_version to intent_version if found + if echo "$field" | grep -q "^stp_version:"; then + echo "intent_version: 2.0.0" >> "$temp_file" + else + echo "$field" >> "$temp_file" + fi + fi + done + + echo "---" >> "$temp_file" + + # Add the rest of the file (skip the first 3 lines) + echo "$content" | tail -n +4 >> "$temp_file" + + # Replace the original file + mv "$temp_file" "$info_file" + echo " Fixed malformed frontmatter" + else + echo " Would fix malformed frontmatter" + fi + fi + + # Re-read content after fixing malformed frontmatter + content=$(cat "$info_file") + + # Check for stp_version field + if echo "$content" | grep -q "^stp_version:"; then + echo " - Found legacy stp_version field" + changes_made=1 + + if [ $WRITE_MODE -eq 1 ]; then + sed -i.bak 's/^stp_version:/intent_version:/' "$info_file" + rm -f "${info_file}.bak" + echo " Updated to intent_version" + else + echo " Would update to intent_version" + fi + fi + + # Check for conflicting status + local yaml_status=$(grep -m 1 "^status:" "$info_file" | sed "s/^status: *//") + local body_status=$(grep -m 1 "^\- \*\*Status\*\*:" "$info_file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + + if [ -n "$yaml_status" ] && [ -n "$body_status" ] && [ "$yaml_status" != "$body_status" ]; then + echo " - Found conflicting status:" + echo " Frontmatter: $yaml_status" + echo " Body: $body_status" + changes_made=1 + + # Normalize the body status + local normalized_status="$body_status" + case "$(echo "$body_status" | tr '[:upper:]' '[:lower:]')" in + "wip"|"in progress") + normalized_status="In Progress" + ;; + "completed"|"complete"|"done") + normalized_status="Completed" + ;; + "not started"|"not-started") + normalized_status="Not Started" + ;; + "cancelled"|"canceled") + normalized_status="Cancelled" + ;; + "on hold") + normalized_status="On Hold" + ;; + esac + + if [ $WRITE_MODE -eq 1 ]; then + # Update frontmatter to match body status (body is user-visible, so prioritize it) + sed -i.bak "s/^status: .*/status: $normalized_status/" "$info_file" + rm -f "${info_file}.bak" + echo " Updated frontmatter status to: $normalized_status" + else + echo " Would update frontmatter status to: $normalized_status" + fi + fi + + # Check for missing required fields + if ! grep -q "^status:" "$info_file"; then + echo " - Missing status field in frontmatter" + changes_made=1 + + if [ $WRITE_MODE -eq 1 ]; then + # Try to get status from body, default to "WIP" + local status="WIP" + if [ -n "$body_status" ]; then + status="$body_status" + fi + + # Add status after intent_version + sed -i.bak '/^intent_version:/a\ +status: '$status "$info_file" + rm -f "${info_file}.bak" + echo " Added status: $status" + else + echo " Would add status field" + fi + fi + + # Check date formats + local yaml_created=$(grep -m 1 "^created:" "$info_file" | sed "s/^created: *//") + local yaml_completed=$(grep -m 1 "^completed:" "$info_file" | sed "s/^completed: *//") + + # Validate created date + if [ -n "$yaml_created" ] && ! [[ "$yaml_created" =~ ^[0-9]{8}$ ]] && [ "$yaml_created" != "YYYYMMDD" ]; then + echo " - Invalid created date format: $yaml_created" + changes_made=1 + + if [ $WRITE_MODE -eq 1 ]; then + # Try to parse and reformat the date + local new_created="" + # Try various date formats + if [[ "$yaml_created" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then + # YYYY-MM-DD format + new_created=$(echo "$yaml_created" | sed 's/-//g') + elif [[ "$yaml_created" =~ ^[0-9]{4}/[0-9]{2}/[0-9]{2}$ ]]; then + # YYYY/MM/DD format + new_created=$(echo "$yaml_created" | sed 's/\///g') + else + # Default to today's date + new_created=$(date '+%Y%m%d') + fi + sed -i.bak "s/^created: .*/created: $new_created/" "$info_file" + rm -f "${info_file}.bak" + echo " Fixed created date: $new_created" + else + echo " Would fix created date format" + fi + fi + + if [ $changes_made -eq 0 ]; then + echo " - No repairs needed" + fi + } + + # Find steel threads to repair + if [ -n "$SPECIFIC_ST" ]; then + # Repair specific steel thread + found=0 + for location in "$BASE_DIR" "$BASE_DIR/COMPLETED" "$BASE_DIR/NOT-STARTED" "$BASE_DIR/CANCELLED"; do + if [ -d "$location/$SPECIFIC_ST" ]; then + repair_steel_thread "$location/$SPECIFIC_ST" + found=1 + break + fi + done + + if [ $found -eq 0 ]; then + error "Steel thread not found: $SPECIFIC_ST" + fi + else + # Repair all steel threads + echo "Scanning all steel threads for repairs..." + echo "" + + # Find all steel thread directories + ALL_ST_DIRS=$(find "$BASE_DIR" -type d -name "ST[0-9][0-9][0-9][0-9]" | sort) + + for dir in $ALL_ST_DIRS; do + repair_steel_thread "$dir" + echo "" + done + fi + + if [ $WRITE_MODE -eq 0 ]; then + echo "" + echo "Dry run complete. Use --write to apply changes." + else + echo "" + echo "Repairs complete." + + # Run organize to move files to correct locations based on repaired status + echo "" + echo "Running organize to ensure correct file locations..." + SCRIPT_PATH="${BASH_SOURCE[0]}" + "$SCRIPT_PATH" organize --write + fi + ;; + "organize") # Parse options WRITE_MODE=0 @@ -925,17 +1481,17 @@ EOF esac done - BASE_DIR="stp/prj/st" + BASE_DIR="intent/st" # Create required directories if they don't exist mkdir -p "$BASE_DIR/COMPLETED" "$BASE_DIR/NOT-STARTED" "$BASE_DIR/CANCELLED" if is_directory_structure; then - # Find all steel thread directories in the root directory - ST_DIRS=$(find "$BASE_DIR" -maxdepth 1 -type d -name "ST[0-9][0-9][0-9][0-9]") + # Find all steel thread directories in any location + ALL_ST_DIRS=$(find "$BASE_DIR" -type d -name "ST[0-9][0-9][0-9][0-9]" | sort) # Process each steel thread directory - for dir in $ST_DIRS; do + for dir in $ALL_ST_DIRS; do # Skip if not a directory if [ ! -d "$dir" ]; then continue @@ -957,70 +1513,63 @@ EOF STATUS="Not Started" fi - echo "Processing directory: $dir" - echo " Directory: $ID - Status: $STATUS" + # Normalize status values to match expected format + case "$(echo "$STATUS" | tr '[:upper:]' '[:lower:]')" in + "wip"|"in progress") + STATUS="In Progress" + ;; + "completed"|"complete"|"done") + STATUS="Completed" + ;; + "not started"|"not-started") + STATUS="Not Started" + ;; + "cancelled"|"canceled") + STATUS="Cancelled" + ;; + "on hold") + STATUS="On Hold" + ;; + esac - # Get the target location for this directory - TARGET_DIR=$(dirname $(get_st_path "$ID" "$STATUS" "info.md")) + # Get current location + CURRENT_PARENT=$(dirname "$dir") - # If we're in write mode and the target location is different, move the directory - if [ $WRITE_MODE -eq 1 ] && [ "$dir" != "$TARGET_DIR" ]; then - mkdir -p "$(dirname "$TARGET_DIR")" - mv "$dir" "$TARGET_DIR" - echo "Moving $ID to $(dirname "$TARGET_DIR")" - else - if [ "$dir" != "$TARGET_DIR" ]; then - echo "Would move $ID to $(dirname "$TARGET_DIR")" - else - echo "$ID stays in main directory" - fi - fi - fi - done - - # Also check subdirectories - for subdir in "$BASE_DIR"/*; do - if [ -d "$subdir" ] && [[ "$subdir" != "$BASE_DIR/steel_threads.md" ]]; then - SUBDIR_NAME=$(basename "$subdir") - - # Find all steel thread directories in this subdirectory - SUB_ST_DIRS=$(find "$subdir" -maxdepth 1 -type d -name "ST[0-9][0-9][0-9][0-9]") + # Determine correct location based on status + case "$STATUS" in + "Completed") + CORRECT_PARENT="$BASE_DIR/COMPLETED" + ;; + "Not Started") + CORRECT_PARENT="$BASE_DIR/NOT-STARTED" + ;; + "Cancelled") + CORRECT_PARENT="$BASE_DIR/CANCELLED" + ;; + *) + # In Progress, On Hold, WIP, or any other status stay in main directory + CORRECT_PARENT="$BASE_DIR" + ;; + esac - for dir in $SUB_ST_DIRS; do - if [ ! -d "$dir" ]; then - continue - fi + # Check if already in correct location + if [ "$CURRENT_PARENT" != "$CORRECT_PARENT" ]; then + echo "Processing: $ID (Status: $STATUS)" + echo " Current location: $CURRENT_PARENT" + echo " Should be in: $CORRECT_PARENT" - ID=$(basename "$dir") - - if [ -f "$dir/info.md" ]; then - YAML_STATUS=$(grep -m 1 "^status:" "$dir/info.md" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$dir/info.md" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo " Processing directory in subdirectory: $dir" - echo " Directory: $ID - Status: $STATUS" - - TARGET_DIR=$(dirname $(get_st_path "$ID" "$STATUS" "info.md")) - - if [ $WRITE_MODE -eq 1 ] && [ "$dir" != "$TARGET_DIR" ]; then - mkdir -p "$(dirname "$TARGET_DIR")" - mv "$dir" "$TARGET_DIR" - echo "Moving $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_DIR")")" - else - if [ "$dir" != "$TARGET_DIR" ]; then - echo "Would move $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_DIR")")" - fi - fi + if [ $WRITE_MODE -eq 1 ]; then + mkdir -p "$CORRECT_PARENT" + mv "$dir" "$CORRECT_PARENT/" + echo " Moved $ID to $CORRECT_PARENT" + else + echo " Would move $ID to $CORRECT_PARENT" fi - done + else + echo "Already organized: $ID in $CORRECT_PARENT (Status: $STATUS)" + fi + else + echo "Warning: No info.md found for $ID" fi done else diff --git a/stp/bin/stp_status b/bin/intent_status similarity index 84% rename from stp/bin/stp_status rename to bin/intent_status index 4645a6d..8419cab 100755 --- a/stp/bin/stp_status +++ b/bin/intent_status @@ -1,6 +1,6 @@ #!/bin/bash -# stp_status - Sync steel thread status based on Backlog task completion -# Usage: stp status <command> [options] [arguments] +# intent_status - Sync steel thread status with Backlog tasks +# Usage: intent status <command> [options] [arguments] # Exit on error set -e @@ -11,23 +11,23 @@ error() { exit 1 } -# Get STP_HOME from environment or determine from script location -if [ -z "$STP_HOME" ]; then +# Get INTENT_HOME from environment or determine from script location +if [ -z "$INTENT_HOME" ]; then SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - export STP_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" + export INTENT_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" fi # Check if backlog is installed if ! command -v backlog &> /dev/null; then echo "Error: Backlog.md is not installed" >&2 echo "" >&2 - cat "$STP_HOME/stp/bin/.help/backlog-install.help.md" >&2 + cat "$INTENT_HOME/bin/.help/backlog-install.help.md" >&2 exit 1 fi # Function to display usage usage() { - echo "Usage: stp status <command> [options] [arguments]" + echo "Usage: intent status <command> [options] [arguments]" echo "" echo "Sync steel thread status based on Backlog task completion" echo "" @@ -40,9 +40,9 @@ usage() { echo " --dry-run Show what would be changed without updating" echo "" echo "Examples:" - echo " stp status show ST0014" - echo " stp status sync ST0014" - echo " stp status report" + echo " intent status show ST0014" + echo " intent status sync ST0014" + echo " intent status report" } # Validate steel thread ID format @@ -65,7 +65,7 @@ get_task_stats() { local draft=0 # Check each task directory - for task_file in "$STP_HOME/backlog/tasks"/task-*.md "$STP_HOME/backlog/drafts"/task-*.md; do + for task_file in "$INTENT_HOME/backlog/tasks"/task-*.md "$INTENT_HOME/backlog/drafts"/task-*.md; do if [ -f "$task_file" ]; then # Check if task belongs to this steel thread (in title field) if grep -q "^title:.*$st_id" "$task_file"; then @@ -155,7 +155,7 @@ EOF if [ "$current_status" != "$recommended_status" ]; then echo "" - echo "Status mismatch detected. Run 'stp status sync $st_id' to update." + echo "Status mismatch detected. Run 'intent status sync $st_id' to update." fi } @@ -250,7 +250,7 @@ EOF update_wip_status() { local st_id="$1" local new_status="$2" - local wip_file="$STP_HOME/stp/prj/wip.md" + local wip_file="$INTENT_HOME/intent/wip.md" if [ ! -f "$wip_file" ]; then return 0 @@ -268,7 +268,7 @@ generate_report() { echo "" # Get all active steel threads - local active_threads=$("$STP_HOME/stp/bin/stp" st list | grep -E "(In Progress|Not Started|On Hold)" | awk -F'|' '{print $1}' | grep -E "ST[0-9]{4}") + local active_threads=$("$INTENT_HOME/bin/intent" st list | grep -E "(In Progress|Not Started|On Hold)" | awk -F'|' '{print $1}' | grep -E "ST[0-9]{4}") if [ -z "$active_threads" ]; then echo "No active steel threads found." @@ -298,7 +298,7 @@ EOF done echo "" - echo "Run 'stp status sync <ST####>' to update any steel thread status." + echo "Run 'intent status sync <ST####>' to update any steel thread status." } # Find steel thread file @@ -307,15 +307,15 @@ find_steel_thread() { local st_file="" # Check main directory - if [ -f "$STP_HOME/stp/prj/st/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/${st_id}.md" + if [ -f "$INTENT_HOME/intent/st/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/${st_id}.md" # Check subdirectories - elif [ -f "$STP_HOME/stp/prj/st/COMPLETED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/COMPLETED/${st_id}.md" - elif [ -f "$STP_HOME/stp/prj/st/NOT-STARTED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/NOT-STARTED/${st_id}.md" - elif [ -f "$STP_HOME/stp/prj/st/CANCELLED/${st_id}.md" ]; then - st_file="$STP_HOME/stp/prj/st/CANCELLED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/COMPLETED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/COMPLETED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/NOT-STARTED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/NOT-STARTED/${st_id}.md" + elif [ -f "$INTENT_HOME/intent/st/CANCELLED/${st_id}.md" ]; then + st_file="$INTENT_HOME/intent/st/CANCELLED/${st_id}.md" fi echo "$st_file" @@ -387,6 +387,6 @@ case "$1" in exit 0 ;; *) - error "Unknown command: $1. Run 'stp status help' for usage information." + error "Unknown command: $1. Run 'intent status help' for usage information." ;; esac \ No newline at end of file diff --git a/stp/bin/stp_task b/bin/intent_task similarity index 81% rename from stp/bin/stp_task rename to bin/intent_task index a5b56dc..4b8007e 100755 --- a/stp/bin/stp_task +++ b/bin/intent_task @@ -1,6 +1,6 @@ #!/bin/bash -# stp_task - Manage Backlog tasks linked to Steel Threads -# Usage: stp task <command> [options] [arguments] +# intent_task - Manage Backlog tasks linked to steel threads +# Usage: intent task <command> [options] [arguments] # Exit on error set -e @@ -11,23 +11,23 @@ error() { exit 1 } -# Get STP_HOME from environment or determine from script location -if [ -z "$STP_HOME" ]; then +# Get INTENT_HOME from environment or determine from script location +if [ -z "$INTENT_HOME" ]; then SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - export STP_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" + export INTENT_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" fi # Check if backlog is installed if ! command -v backlog &> /dev/null; then echo "Error: Backlog.md is not installed" >&2 echo "" >&2 - cat "$STP_HOME/stp/bin/.help/backlog-install.help.md" >&2 + cat "$INTENT_HOME/bin/.help/backlog-install.help.md" >&2 exit 1 fi # Function to display usage usage() { - echo "Usage: stp task <command> [options] [arguments]" + echo "Usage: intent task <command> [options] [arguments]" echo "" echo "Manage Backlog tasks linked to Steel Threads" echo "" @@ -37,9 +37,9 @@ usage() { echo " sync <ST####> Sync task status with steel thread" echo "" echo "Examples:" - echo " stp task create ST0014 \"Update documentation\"" - echo " stp task list ST0014" - echo " stp task sync ST0014" + echo " intent task create ST0014 \"Update documentation\"" + echo " intent task list ST0014" + echo " intent task sync ST0014" } # Validate steel thread ID format @@ -64,7 +64,7 @@ create_task() { validate_st_id "$st_id" || exit 1 # Check if steel thread exists - if ! "$STP_HOME/stp/bin/stp" st show "$st_id" >/dev/null 2>&1; then + if ! "$INTENT_HOME/bin/intent" st show "$st_id" >/dev/null 2>&1; then echo "Error: Steel thread $st_id not found" >&2 exit 1 fi @@ -73,8 +73,8 @@ create_task() { local full_title="$st_id - $title" echo "Creating task: $full_title" - # Use stp bl wrapper to create the task - if "$STP_HOME/stp/bin/stp" bl create "$st_id" "$title"; then + # Use intent bl wrapper to create the task + if "$INTENT_HOME/bin/intent" bl create "$st_id" "$title"; then echo "Task created successfully" else exit 1 @@ -95,7 +95,7 @@ list_tasks() { echo "================" # Check task files directly - for task_file in "$STP_HOME/backlog/tasks"/task-*.md; do + for task_file in "$INTENT_HOME/backlog/tasks"/task-*.md; do if [ -f "$task_file" ]; then # Check if the task title contains the steel thread ID if grep -q "^title:.*$st_id" "$task_file"; then @@ -172,10 +172,10 @@ sync_status() { echo "" echo "Recommended steel thread status: $st_status" - # Note: Actual status update would be done by stp-status command + # Note: Actual status update would be done by intent-status command echo "" echo "To update steel thread status, run:" - echo " stp-status sync $st_id" + echo " intent-status sync $st_id" } # Display usage if no arguments provided @@ -203,6 +203,6 @@ case "$1" in exit 0 ;; *) - error "Unknown command: $1. Run 'stp task help' for usage information." + error "Unknown command: $1. Run 'intent task help' for usage information." ;; esac \ No newline at end of file diff --git a/bin/intent_upgrade b/bin/intent_upgrade new file mode 100755 index 0000000..2f33d9d --- /dev/null +++ b/bin/intent_upgrade @@ -0,0 +1,306 @@ +#!/bin/bash +# intent_upgrade - Upgrade any STP version to Intent +# Usage: intent upgrade [--backup-dir DIR] + +# Exit on error +set -e + +# Function to display error messages +error() { + echo "Error: $1" >&2 + exit 1 +} + +# Source helpers +INTENT_BIN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INTENT_HOME="$(cd "$INTENT_BIN/.." && pwd)" +source "$INTENT_BIN/intent_helpers" + +# Get target version +TARGET_VERSION="$(get_intent_version 2>/dev/null || echo "2.3.2")" + +# Function to display usage information +usage() { + cat << EOF +Usage: intent upgrade [--backup-dir DIR] + +Upgrade any STP version project to Intent v$TARGET_VERSION + +Options: + --backup-dir DIR Custom backup directory name (default: .backup/backup-TIMESTAMP) + --no-backup Skip backup creation (dangerous!) + -h, --help Show this help message + +Examples: + intent upgrade + intent upgrade --backup-dir my-backup + +This command will: +1. Detect the current STP/Intent version +2. Create a backup of the current state +3. Migrate directory structure from stp/* to intent/* +4. Convert YAML configs to JSON format +5. Update all metadata and file formats +6. Create .intent/config.json +7. Initialize agent configuration (v2.1.0+) +8. Update CLAUDE.md with Intent v$TARGET_VERSION guidelines +EOF + exit 0 +} + +# Parse options +BACKUP_DIR="" +NO_BACKUP=false + +while [[ $# -gt 0 ]]; do + case "$1" in + --backup-dir) + BACKUP_DIR="$2" + shift 2 + ;; + --no-backup) + NO_BACKUP=true + shift + ;; + -h|--help) + usage + ;; + *) + error "Unknown option: $1" + ;; + esac +done + +# Detect current version +echo "Detecting current project version..." +VERSION=$(detect_stp_version . || echo "") + +if [ -z "$VERSION" ]; then + echo "Could not detect project version." + echo "" + echo "This may not be an STP/Intent project, or it may be using an unrecognized structure." + echo "To create a new Intent project, use: intent init" + exit 1 +fi + +# Always show version information +echo "" +echo "Current version: $VERSION" +echo "Target version: $TARGET_VERSION" +echo "" + +# Check if already at target version +if [ "$VERSION" = "$TARGET_VERSION" ]; then + echo "✓ Project is already at Intent v$TARGET_VERSION" + echo " No upgrade needed." + exit 0 +fi + +# Determine if migration is needed +if ! needs_v2_migration "$VERSION" && ! needs_v2_1_upgrade "$VERSION" && ! needs_v2_2_upgrade "$VERSION" && ! needs_v2_2_1_upgrade "$VERSION" && ! needs_v2_3_0_upgrade "$VERSION" && ! needs_v2_3_1_upgrade "$VERSION"; then + echo "✓ Project is already up to date at version $VERSION" + echo " No upgrade path available from $VERSION to $TARGET_VERSION" + exit 0 +fi + +echo "→ Upgrade available: $VERSION → $TARGET_VERSION" + +# Create backup unless disabled +if [ "$NO_BACKUP" != true ]; then + # Set backup directory name + if [ -z "$BACKUP_DIR" ]; then + BACKUP_NAME="backup-$(date +%Y%m%d-%H%M%S)" + else + # If user provided a backup dir, extract just the name + BACKUP_NAME=$(basename "$BACKUP_DIR") + fi + + # Always put backups under .backup directory + BACKUP_DIR=".backup/$BACKUP_NAME" + + echo "Creating backup in $BACKUP_DIR..." + mkdir -p "$BACKUP_DIR" + + # Backup relevant directories + for dir in stp .stp-config .intent intent backlog CLAUDE.md; do + if [ -e "$dir" ]; then + cp -r "$dir" "$BACKUP_DIR/" 2>/dev/null || true + fi + done + + echo "Backup created successfully" +fi + +echo "" +echo "Starting migration to Intent v$TARGET_VERSION..." +echo "" + +# Create v2.0.0 directory structure +echo "Creating Intent v2.0.0 directory structure..." +create_v2_directory_structure . + +# Migrate based on version +case "$VERSION" in + "0.0.0") + echo "Migrating from v0.0.0..." + migrate_v0_to_v2 . + ;; + "1.0.0"|"1.2.0") + echo "Migrating from v$VERSION..." + migrate_v1_2_0_to_v2 . + ;; + "1.2.1") + echo "Migrating from v1.2.1..." + migrate_v1_2_1_to_v2 . + ;; + "2.0.0") + echo "Upgrading from v2.0.0 to v$TARGET_VERSION..." + migrate_v2_0_to_v2_1 . + migrate_v2_1_to_v2_2 . + migrate_v2_2_to_v2_2_1 . + migrate_v2_2_to_v2_3_0 . + migrate_v2_3_0_to_v2_3_1 . + migrate_v2_3_1_to_v2_3_2 . + ;; + "2.1.0") + echo "Upgrading from v2.1.0 to v$TARGET_VERSION..." + migrate_v2_1_to_v2_2 . + migrate_v2_2_to_v2_2_1 . + migrate_v2_2_to_v2_3_0 . + migrate_v2_3_0_to_v2_3_1 . + migrate_v2_3_1_to_v2_3_2 . + ;; + "2.2.0") + echo "Upgrading from v2.2.0 to v$TARGET_VERSION..." + migrate_v2_2_to_v2_2_1 . + migrate_v2_2_to_v2_3_0 . + migrate_v2_3_0_to_v2_3_1 . + migrate_v2_3_1_to_v2_3_2 . + ;; + "2.2.1") + echo "Upgrading from v2.2.1 to v$TARGET_VERSION..." + migrate_v2_2_to_v2_3_0 . + migrate_v2_3_0_to_v2_3_1 . + migrate_v2_3_1_to_v2_3_2 . + ;; + "2.3.0") + echo "Upgrading from v2.3.0 to v$TARGET_VERSION..." + migrate_v2_3_0_to_v2_3_1 . + migrate_v2_3_1_to_v2_3_2 . + ;; + "2.3.1") + echo "Upgrading from v2.3.1 to v$TARGET_VERSION..." + migrate_v2_3_1_to_v2_3_2 . + ;; + *) + error "Unknown version: $VERSION" + ;; +esac + +# Migrate backlog if it exists +if [ -d "backlog" ]; then + echo "Backlog directory found, preserving..." + # Update backlog config if needed + if [ -f "backlog/.config" ]; then + if ! grep -q "task_prefix=ST" "backlog/.config"; then + echo "task_prefix=ST" >> "backlog/.config" + fi + fi +fi + + +# Ensure proper upgrade path to v$TARGET_VERSION +# Only run intermediate upgrades for older versions +if [ "$VERSION" != "2.0.0" ] && [ "$VERSION" != "2.1.0" ] && [ "$VERSION" != "2.2.0" ]; then + echo "Initializing agent configuration..." + migrate_v2_0_to_v2_1 . + echo "Applying v2.2.0 upgrade..." + migrate_v2_1_to_v2_2 . + echo "Finalizing v$TARGET_VERSION upgrade..." + migrate_v2_2_to_v2_2_1 . +elif [ "$VERSION" = "2.0.0" ]; then + # 2.0.0 needs all three upgrades + echo "Applying v2.1.0 upgrade..." + migrate_v2_0_to_v2_1 . + echo "Applying v2.2.0 upgrade..." + migrate_v2_1_to_v2_2 . + echo "Finalizing v$TARGET_VERSION upgrade..." + migrate_v2_2_to_v2_2_1 . +elif [ "$VERSION" = "2.1.0" ]; then + # 2.1.0 needs 2.2.0 and 2.2.1 upgrades + echo "Applying v2.2.0 upgrade..." + migrate_v2_1_to_v2_2 . + echo "Finalizing v$TARGET_VERSION upgrade..." + migrate_v2_2_to_v2_2_1 . +fi +# 2.2.0 case is handled by the main switch statement above + +# Preserve agent manifests +if [ -d ".intent/agents" ]; then + echo "Agent manifests found, preserving..." +fi +if [ -d "intent/agents/.manifest" ]; then + echo "Project agent manifests found, preserving..." +fi + +# Create or update CLAUDE.md +CLAUDE_EXISTED=false +if [ -f "CLAUDE.md" ]; then + echo "Preserving existing CLAUDE.md..." + CLAUDE_EXISTED=true +else + echo "Creating CLAUDE.md..." + create_claude_md . +fi + +# Final message +echo "" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "✓ Upgrade completed successfully!" +echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" +echo "" +echo "Version upgraded: $VERSION → $TARGET_VERSION" +echo "" +echo "Summary of changes:" +if [ "$VERSION" != "2.0.0" ]; then + echo "- Directory structure migrated from stp/* to intent/*" + echo "- Configuration converted from YAML to JSON" +fi +echo "- Metadata updated to Intent v$TARGET_VERSION format" +echo "- Created/updated .intent/config.json" +echo "- Initialized agent configuration" +if [ "$CLAUDE_EXISTED" = "true" ]; then + echo "- Preserved existing CLAUDE.md" +else + echo "- Created CLAUDE.md with Intent guidelines" +fi + +if [ "$NO_BACKUP" != true ]; then + echo "" + echo "Backup location: $BACKUP_DIR" +fi + +echo "" +echo "Next steps:" +echo "1. Review the migrated content in the intent/ directory" +echo "2. Test commands with 'intent' instead of 'stp'" +echo "3. Run 'intent doctor' to verify configuration" +echo "4. Run 'intent agents list' to see available agents" +echo "5. Delete the old stp/ directory when ready" + +# Offer to remove old stp directory +echo "" +echo "The old stp/ directory is still present. You can:" +echo "- Keep it for reference" +echo "- Remove it manually with: rm -rf stp/" +echo "- Let this script remove it (type 'yes' to confirm)" +echo "" +read -p "Remove old stp/ directory now? (yes/no): " response + +if [ "$response" = "yes" ]; then + echo "Removing stp/ directory..." + rm -rf stp/ + echo "Old directory removed." +else + echo "Keeping old stp/ directory for reference." +fi \ No newline at end of file diff --git a/bin/stp b/bin/stp new file mode 120000 index 0000000..f3dfb0c --- /dev/null +++ b/bin/stp @@ -0,0 +1 @@ +intent \ No newline at end of file diff --git a/stp/doc/blog/0000-motivation-for-stp.md b/docs/blog/0000-motivation-for-intent.md similarity index 81% rename from stp/doc/blog/0000-motivation-for-stp.md rename to docs/blog/0000-motivation-for-intent.md index 50625ef..a786971 100644 --- a/stp/doc/blog/0000-motivation-for-stp.md +++ b/docs/blog/0000-motivation-for-intent.md @@ -1,16 +1,16 @@ --- -title: "The Motivation for STP: Why Intention Matters" +title: "The Motivation for Intent: Why Intention Matters" date: "2025-07-08" author: "Matthew Sinclair" draft: false word_count: 1507 --- -# The Motivation for STP: Why Intention Matters in LLM-Assisted Development +# The Motivation for Intent: Why Intention Matters in LLM-Assisted Development In the rapidly evolving landscape of AI-assisted software development, we stand at a critical juncture. Large Language Models (LLMs) like Claude have transformed how we write code, debug systems, and architect solutions. Yet, despite their remarkable capabilities, a fundamental challenge persists: how do we ensure that these powerful tools truly understand not just what we want to build, but why we want to build it? -This is the story of the Steel Thread Process (STP) – a response to the growing disconnect between developer intention and LLM execution. It's a framework born from the realisation that in our rush to leverage AI capabilities, we've overlooked the most crucial element of successful collaboration: shared understanding of purpose. +This is the story of the Intent (Intent) – a response to the growing disconnect between developer intention and LLM execution. It's a framework born from the realisation that in our rush to leverage AI capabilities, we've overlooked the most crucial element of successful collaboration: shared understanding of purpose. ## The Fundamental Challenge of LLM Collaboration @@ -19,6 +19,7 @@ When you sit down with an LLM to solve a coding problem, something remarkable ha This creates what I call the "illusion of understanding." The LLM's responses are so coherent, so contextually appropriate, that we assume it grasps not just what we're asking for, but why we're asking for it. This assumption becomes dangerous as projects grow in complexity. Consider a typical interaction: + - Developer: "Create a user authentication system" - LLM: *Generates a complete auth system with login, registration, and password reset* @@ -35,6 +36,7 @@ In traditional software development, we've long recognised the importance of req With LLM-assisted development, this problem intensifies. The speed at which we can now generate code means we can travel much farther down the wrong path before realising we've lost our way. The question "what problem are we solving?" becomes not just important, but critical to project success. I've witnessed this firsthand in numerous projects: + - A caching system that improved performance but made debugging impossible - An elegant API that satisfied all technical requirements but failed to meet actual user needs - A refactoring that improved code quality while eliminating features users depended on @@ -42,6 +44,7 @@ I've witnessed this firsthand in numerous projects: The business cost of this intention-implementation misalignment is staggering. Industry studies consistently show that fixing problems in production costs 100x more than preventing them during design. When working with LLMs, this multiplier effect accelerates because we can implement misaligned solutions faster than ever before. Intention gets diluted through implementation phases in predictable ways: + 1. Initial vision → Vague requirements 2. Requirements → Technical specifications 3. Specifications → Implementation details @@ -55,9 +58,10 @@ You might be thinking, "Don't existing methodologies already address this?" It's Traditional approaches were designed for human-to-human communication. They assume shared context, cultural understanding, and the ability to read between the lines. LLMs, however brilliant, lack these implicit understandings. They need explicit intention to guide their pattern matching toward useful outcomes. -STP doesn't seek to replace your existing methodology. Instead, it adds a layer of intention clarity that makes any approach more effective when working with LLMs. Think of it as adding semantic markup to your development process – making the implicit explicit. +Intent doesn't seek to replace your existing methodology. Instead, it adds a layer of intention clarity that makes any approach more effective when working with LLMs. Think of it as adding semantic markup to your development process – making the implicit explicit. In practice, this means: + - **Agile stories** gain intention statements that explain why this feature matters - **Lean experiments** document not just what to measure, but what we hope to learn - **XP practices** include intention context that helps LLMs suggest appropriate solutions @@ -73,6 +77,7 @@ What's been missing from our development stack is what I call "Intent Architectu Intention forms the foundation of all technical decisions, yet we rarely make it explicit. When intention is clear, constraints become clearer too. Instead of arbitrary technical requirements, we have purpose-driven boundaries that guide both human and LLM decision-making. The relationship between intention clarity and implementation quality is direct and measurable: + - Clear intention → Focused solutions - Vague intention → Over-engineered or off-target implementations - No stated intention → Solutions in search of problems @@ -83,32 +88,36 @@ Building a shared mental model between humans and LLMs requires making our menta ## Intention-First Development as a Paradigm Shift -STP represents a paradigm shift in how we approach development. Instead of starting with "what to build," we start with "why we're building it." This isn't just philosophical – it fundamentally changes how we interact with LLMs and how they can assist us. +Intent represents a paradigm shift in how we approach development. Instead of starting with "what to build," we start with "why we're building it." This isn't just philosophical – it fundamentally changes how we interact with LLMs and how they can assist us. When intention leads, the developer-LLM conversation transforms: + - Before: "Build me a REST API for user management" - After: "We need to enable self-service user onboarding to reduce support load. Let's design an API that prioritises ease of use over flexibility." The second prompt doesn't just describe what to build – it provides the context needed for intelligent trade-offs. The LLM can now suggest solutions optimised for your actual goals, not just technical correctness. The return on investment in intention clarification is substantial: + - **Reduced rework**: Solutions align with goals from the start - **Better suggestions**: LLMs provide more relevant options - **Clearer evaluation**: Easy to assess if solutions meet intentions - **Knowledge preservation**: Future developers (and LLMs) understand the why Consider how intention transforms implementation approaches. The same requirement – "add user notifications" – leads to vastly different solutions depending on intention: + - Intention: Increase engagement → Rich, frequent notifications - Intention: Reduce cognitive load → Minimal, batched notifications - Intention: Meet compliance requirements → Audit-focused notifications Intention becomes the cornerstone of technical intuition. When we make intentions explicit, we're not just documenting for others – we're clarifying our own thinking and creating a foundation for better technical decisions. -## STP: An Answer to the Intention Question +## Intent: An Answer to the Intention Question -The Steel Thread Process emerged from a simple observation: successful LLM collaboration requires explicit intention capture, but we lacked a systematic way to achieve this. STP was designed specifically to bridge this gap. +The Intent emerged from a simple observation: successful LLM collaboration requires explicit intention capture, but we lacked a systematic way to achieve this. Intent was designed specifically to bridge this gap. + +At its core, Intent introduces the concept of a "steel thread" – not just a development task or user story, but an intention container. Each steel thread captures: -At its core, STP introduces the concept of a "steel thread" – not just a development task or user story, but an intention container. Each steel thread captures: - The problem we're solving (the why) - The success criteria (how we'll know we've succeeded) - The constraints and context (the boundaries) @@ -116,7 +125,8 @@ At its core, STP introduces the concept of a "steel thread" – not just a devel This structure prioritises the "why" before the "what." By the time we get to implementation details, both humans and LLMs have a clear understanding of purpose. -STP creates intention alignment throughout development by: +Intent creates intention alignment throughout development by: + 1. **Capturing intention** at the moment of conception 2. **Preserving intention** through structured documentation 3. **Referencing intention** during implementation decisions @@ -124,8 +134,8 @@ STP creates intention alignment throughout development by: The difference between tool-first and intention-first methodologies is profound. Tool-first approaches ask "How can we use this LLM?" Intention-first asks "What are we trying to achieve, and how can an LLM help?" -In the following posts in this series, we'll explore how STP implements these principles, dive deep into the steel thread methodology, and see real examples of intention-driven development in action. But the journey starts here, with a simple recognition: in the age of AI-assisted development, intention isn't just important – it's essential. +In the following posts in this series, we'll explore how Intent implements these principles, dive deep into the steel thread methodology, and see real examples of intention-driven development in action. But the journey starts here, with a simple recognition: in the age of AI-assisted development, intention isn't just important – it's essential. ## Next Steps -Ready to see how STP puts these principles into practice? Continue with our next post: [Introduction to STP](./0001-introduction-to-stp.md), where we'll explore the concrete components and workflow that make intention-first development a reality. +Ready to see how Intent puts these principles into practice? Continue with our next post: [Introduction to Intent](./0001-introduction-to-intent.md), where we'll explore the concrete components and workflow that make intention-first development a reality. diff --git a/docs/blog/0001-introduction-to-intent.md b/docs/blog/0001-introduction-to-intent.md new file mode 100644 index 0000000..8cc0f49 --- /dev/null +++ b/docs/blog/0001-introduction-to-intent.md @@ -0,0 +1,258 @@ +--- +title: "Introduction to Intent" +date: "2025-07-08" +author: "Matthew Sinclair" +draft: false +word_count: 1623 +--- + +# Introduction to Intent: A Better Way to Build Software + +If you've ever lost track of why a piece of code exists, struggled to onboard a new team member, or watched an LLM confidently solve the wrong problem, you understand the cost of lost intention in software development. + +## See the Difference + +**Without Intent:** + +```python +# cache.py +def get_user_profile(user_id): + # Check cache first + cached = redis.get(f"user:{user_id}") + if cached: + return json.loads(cached) + + # Complex caching logic with multiple layers... + # 200 lines of sophisticated caching code +``` + +6 months later: "Why is this cache so complex? Let's simplify it!" + +**With Intent:** + +```bash +$ intent st show ST0015 +# Reveals: "Multi-layer cache because: +# - API rate limits: 100 requests/minute +# - Black Friday traffic: 10,000 requests/second +# - Customer requirement: <50ms response time" +``` + +Now you know: That "complex" cache is saving your business! + +In our [previous post](./0000-motivation-for-intent.md), we explored why capturing and preserving intention is crucial for modern development, especially when collaborating with AI. Today, I'll show you exactly how Intent solves this problem with a practical, lightweight system that enhances your existing workflow. + +## Building on the Intention Foundation + +We established that the fundamental challenge in modern development isn't just building software – it's ensuring that what we build aligns with why we're building it. This challenge intensifies when working with LLMs, which excel at pattern matching but lack understanding of underlying purpose. + +Intent addresses this challenge by making intention explicit and structural. Rather than hoping developers will document the "why" or expecting LLMs to infer our goals, Intent builds intention capture into the development workflow itself. + +The shift from theoretical understanding to practical implementation happens through three key innovations: + +1. **Steel threads** that encapsulate both intent and implementation +2. **Structured templates** that prompt for intention at every stage +3. **Integration with task management** that maintains the intent-to-execution link + +## What is Intent? + +Intent is a lightweight methodology that captures the "why" behind your code. Here's how it works in practice: + +### Try This Example + +```bash +# 1. Start a new feature +$ intent st new "Add password reset functionality" +Created: ST0023 + +# 2. Capture the REAL requirements (not just "add password reset") +$ intent st edit ST0023 +``` + +In your editor, document the actual constraints: + +```markdown +## Objective +Implement secure password reset that prevents account takeover + +## Context +- Recent security audit flagged email-based reset as vulnerable +- 15% of support tickets are password-related +- Must comply with SOC2 requirements +- Cannot break existing mobile app (v2.3.x) + +## Approach +- Time-limited tokens (15 minutes) +- Rate limiting (3 attempts per hour) +- Multi-factor verification for high-value accounts +- Backward compatible API endpoints +``` + +### Now Share with Your AI Assistant + +```bash +$ intent st show ST0023 | pbcopy +# Paste into Claude/ChatGPT/Copilot +``` + +The AI immediately understands: + +- ❌ Not just "implement password reset" +- ✅ Security requirements, compliance needs, compatibility constraints +- ✅ Suggests appropriate security patterns +- ✅ Knows to maintain backward compatibility + +### The Three Components + +**Shell Scripts**: Simple automation + +- `intent st new` - Create steel threads +- `intent st show` - View intentions +- `intent bl` - Manage tasks + +**Markdown Templates**: Structured capture + +- Forces "why" before "what" +- Consistent format for AI parsing +- Human-readable documentation + +**Task Tracking**: Execution management + +- Break threads into concrete tasks +- Track progress visually +- Maintain thread-to-task linkage + +The magic: Your future self (and AI) always knows WHY code exists. + +## Core Principles of Intent + +Intent is built on eight core principles that guide its design and implementation: + +### 1. Documentation as a First-Class Citizen + +In Intent, documentation isn't something you do after coding – it's an integral part of the development process. Every steel thread starts with documentation that captures intention, and this documentation evolves alongside the code. When documentation drives development, both humans and LLMs have the context they need to make good decisions. + +### 2. Intent Capture Throughout the Lifecycle + +Intention isn't just captured at the beginning – it's maintained and referenced throughout development. From initial conception through implementation to future maintenance, the "why" remains visible and relevant. This creates a traceable lineage from business need to technical implementation. + +### 3. Incremental Development Through Steel Threads + +Rather than tackling entire features or epics, Intent encourages breaking work into steel threads – complete, minimal paths through your system. Each thread can be understood, implemented, and validated independently, making development more manageable and progress more visible. + +### 4. Task Tracking Linked to Steel Threads + +While steel threads capture the big picture intention, individual tasks track the detailed work. Intent's integration with Backlog.md creates a two-level system: strategic intent at the thread level, tactical execution at the task level. This separation keeps both perspectives clear and connected. + +### 5. Process-Agnostic Compatibility + +Intent doesn't dictate how you should develop software. Whether you're using Scrum, Kanban, or any other methodology, Intent layers intention-awareness on top. It's designed to enhance, not replace, your existing workflow. + +### 6. Lightweight Enhancement + +The entire Intent system consists of simple shell scripts and markdown templates. No complex tools to learn, no vendor lock-in, no heavyweight processes. You can adopt Intent incrementally, starting with a single steel thread and expanding as you see value. + +### 7. Flexibility to Match Your Workflow + +Every team works differently. Intent's templates and processes are starting points, not rigid requirements. Modify templates, adjust workflows, and make Intent work for your specific needs while maintaining the core principle of intention capture. + +### 8. Integration with Modern LLM Tooling + +Intent was designed in the age of AI-assisted development. Its structured approach to intention and documentation makes it particularly effective when working with LLMs, providing the context and clarity these tools need to be truly helpful. + +## The Steel Thread Concept + +While we'll dive deep into steel threads in the next post, it's worth understanding the basic concept as it's central to how Intent works. + +A steel thread is a complete, minimal path through your system that delivers value. Think of it as the thinnest possible slice that: + +- Solves a real problem +- Can be implemented independently +- Provides learning about the system +- Captures clear intention + +### Real Impact: Intent at Work + +Here's a real example from building Intent itself: + +```bash +$ intent st show ST0012 +# "Document Sync Command - sync steel thread index" + +$ intent st show ST0016 +# "Rename STP to Intent - complete v2.1.0 refactoring" +``` + +These aren't just task lists. Each thread contains: + +- **Why** we needed these features +- **What** constraints we faced +- **How** we approached the solution +- **Learnings** from implementation + +When working on ST0016 (the major refactoring), the AI assistant could see: + +- Why the rename was necessary +- What backward compatibility to maintain +- Which patterns to follow + +Result: The AI provided targeted, context-aware suggestions instead of generic refactoring advice. + +## Benefits of Intent + +Adopting Intent brings concrete benefits to your development process: + +### Better Alignment Between Intent and Implementation + +When every piece of code traces back to a clearly stated intention, misalignment becomes obvious and correctable. Reviews shift from "Is this good code?" to "Does this serve our purpose?" – a much more valuable question. + +### Documentation That Stays Up-to-Date + +Because documentation drives development rather than following it, it naturally stays current. The templates prompt for updates at each stage, and the documentation evolves alongside the implementation. No more archaeology to understand why code exists. + +### Fine-Grained Visibility Into Work Progress + +The two-tier system of steel threads and tasks provides both strategic and tactical visibility. Stakeholders can track high-level progress through steel threads, while developers manage day-to-day work through linked tasks. Everyone gets the view they need. + +### Automatic Status Synchronization + +As tasks complete, steel thread status updates automatically. This isn't just convenient – it ensures that high-level tracking reflects ground truth. No more status meetings to figure out where things really stand. + +### Improved Onboarding Experience + +New team members can understand not just what the code does, but why it exists. Each steel thread tells a complete story from intention to implementation. This context dramatically reduces the time needed to become productive. + +### Enhanced Collaboration with LLMs + +When working with AI assistants, the structured intention and context in Intent documentation provides exactly what LLMs need to give relevant, aligned suggestions. Instead of guessing at your goals, they can reference explicit intentions. + +### More Efficient Development Process + +While it might seem like additional overhead, Intent actually streamlines development by: + +- Reducing rework from misunderstood requirements +- Eliminating redundant status tracking +- Preventing scope creep through clear intentions +- Enabling parallel work through independent steel threads + +## What's Coming in This Blog Series + +This introduction has given you a high-level view of Intent, but there's much more to explore. Here's what's coming in the rest of this series: + +**[The Steel Thread Methodology](./0002-the-steel-thread-methodology.md)**: A deep dive into steel threads – what they are, how to create them, and why they're more effective than traditional work organisation. + +**[Intent Capture in Software Development](./0003-intent-capture-in-software-development.md)**: Practical techniques for capturing, preserving, and leveraging intention throughout your development process. + +**[LLM Collaboration with Intent](./0004-llm-collaboration-with-intent.md)**: How Intent's structure makes AI assistance more effective, with real examples of improved LLM interactions. + +**[Getting Started with Intent](./0005-getting-started-with-intent.md)**: A practical guide to implementing Intent in your project, including installation, configuration, and your first steel thread. + +**[Next Steps and Future Work](./0006-next-steps-and-future-work.md)**: Where Intent is heading and how you can contribute to its development. + +## Ready to Transform Your Development Process? + +Intent offers a pragmatic solution to the intention problem in modern software development. By making intention explicit and structural, it bridges the gap between why we build and what we build, creating better outcomes for both human and AI collaboration. + +In the next post, we'll explore the steel thread methodology in detail, showing you exactly how to break down work in a way that preserves intention while enabling incremental progress. + +[Continue to: The Steel Thread Methodology →](./0002-the-steel-thread-methodology.md) diff --git a/stp/doc/blog/0002-the-steel-thread-methodology.md b/docs/blog/0002-the-steel-thread-methodology.md similarity index 80% rename from stp/doc/blog/0002-the-steel-thread-methodology.md rename to docs/blog/0002-the-steel-thread-methodology.md index 47c25f8..872934f 100644 --- a/stp/doc/blog/0002-the-steel-thread-methodology.md +++ b/docs/blog/0002-the-steel-thread-methodology.md @@ -8,13 +8,13 @@ word_count: 1842 # The Steel Thread Methodology: Development with Purpose -In our [previous post](./0001-introduction-to-stp.md), we introduced STP as a lightweight system for intention-aware development. At the heart of STP lies a simple yet powerful concept: the steel thread. Today, we'll explore what steel threads are, how they work, and why they're more effective than traditional approaches to organising development work. +In our [previous post](./0001-introduction-to-intent.md), we introduced Intent as a lightweight system for intention-aware development. At the heart of Intent lies a simple yet powerful concept: the steel thread. Today, we'll explore what steel threads are, how they work, and why they're more effective than traditional approaches to organising development work. If you've ever struggled with work items that are too large to complete quickly but too small to justify extensive planning, or if you've watched documentation drift away from implementation reality, steel threads offer a practical solution. ## What is a Steel Thread? -The term "steel thread" comes from systems engineering, where it represents the thinnest possible slice of functionality that connects all parts of a system. In STP, a steel thread is a self-contained unit of work that: +The term "steel thread" comes from systems engineering, where it represents the thinnest possible slice of functionality that connects all parts of a system. In Intent, a steel thread is a self-contained unit of work that: 1. **Captures clear intention** - Why this work matters 2. **Delivers tangible value** - A complete, usable outcome @@ -41,7 +41,7 @@ Here's a visual comparison: Traditional: Epic → Stories → Tasks → PRs (Why gets lost along the way) -STP: Intention → Steel Thread → Tasks +Intent: Intention → Steel Thread → Tasks (Why is preserved throughout) ``` @@ -64,7 +64,7 @@ Steel threads don't replace your existing methodology – they enhance it with i Steel threads adapt to your existing structure: ``` -Your Process → STP Enhancement +Your Process → Intent Enhancement ───────────────────────────────────────── Epic → Multiple related steel threads User Story → One steel thread @@ -78,6 +78,7 @@ The key insight: steel threads don't add another layer of work items. They add i ### Process-Agnostic Functionality Containers Steel threads work because they focus on one thing: capturing and preserving intention through the development lifecycle. They don't dictate: + - How you estimate work - When you do planning - Who makes decisions @@ -105,23 +106,10 @@ Every steel thread follows a consistent structure that captures intention and gu ### Structure and Components -Starting with STP v1.2.1, steel threads are organized as directories containing multiple files: - -``` -ST####/ -├── info.md # Main information and metadata -├── design.md # Design decisions and approach -├── impl.md # Implementation details -├── tasks.md # Task tracking -└── results.md # Results and outcomes -``` - -The main `info.md` file contains: +A typical steel thread document contains: ```markdown --- -verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" -stp_version: 1.2.1 status: Not Started|In Progress|Completed|On Hold created: YYYYMMDD completed: YYYYMMDD @@ -131,26 +119,32 @@ completed: YYYYMMDD ## Objective [Clear statement of what this thread aims to achieve] -## Context +## Context [Why this work matters and how it fits the bigger picture] -``` -The separation into multiple files allows better organization: -- **design.md**: Captures approach and architectural decisions -- **impl.md**: Documents actual implementation details -- **tasks.md**: Links to Backlog tasks or contains embedded checklist -- **results.md**: Records what was delivered and learned +## Approach +[High-level strategy for implementation] + +## Tasks +[Linked to Backlog tasks or embedded checklist] + +## Implementation Notes +[Decisions, learnings, and details that emerge during work] + +## Results +[What was actually delivered and learned] +``` ### Status Tracking and Task Integration -STP integrates with Backlog.md to provide two-tier tracking: +Intent integrates with Backlog.md to provide two-tier tracking: 1. **Thread Level**: Overall progress of the steel thread 2. **Task Level**: Granular work items linked to the thread ```bash # View all tasks for a steel thread -$ stp task list ST0013 +$ intent task list ST0013 Tasks for ST0013: ================ task-6 [done] Research existing docs @@ -158,7 +152,7 @@ task-7 [done] Write introduction section task-8 [in_progress] Write challenges section # Check status alignment -$ stp status show ST0013 +$ intent status show ST0013 Steel Thread: ST0013 Current Status: Not Started Task Summary: @@ -182,22 +176,18 @@ This evolution preserves the journey from intention to implementation, creating ## Managing Work with Steel Threads -The STP workflow makes steel thread management straightforward and systematic. +The Intent workflow makes steel thread management straightforward and systematic. ### Creating and Planning Steel Threads ```bash # Create a new steel thread -$ stp st new +$ intent st new Enter title: Implement user authentication Created: ST0015 -# View the created directory structure -$ ls stp/prj/st/ST0015/ -info.md design.md impl.md tasks.md results.md - -# The info.md template guides intention capture -$ cat stp/prj/st/ST0015/info.md +# The template guides intention capture +$ cat intent/prj/st/ST0015.md ``` The template prompts for objective, context, and approach – ensuring you capture intention from the start. @@ -208,13 +198,13 @@ Once you've defined the steel thread, create granular tasks: ```bash # Create tasks linked to the steel thread -$ stp task create ST0015 "Research authentication options" +$ intent task create ST0015 "Research authentication options" Created task task-59 -$ stp task create ST0015 "Implement login endpoint" +$ intent task create ST0015 "Implement login endpoint" Created task task-60 -$ stp task create ST0015 "Add session management" +$ intent task create ST0015 "Add session management" Created task task-61 ``` @@ -222,28 +212,20 @@ The naming convention `ST#### - Description` maintains the link between tasks an ### Tracking Progress -STP provides multiple views of your work: +Intent provides multiple views of your work: ```bash # See all tasks for a thread -$ stp task list ST0015 +$ intent task list ST0015 # Check overall project status -$ stp st list --status in_progress - -# View specific files in the steel thread -$ stp st show ST0015 # Shows info.md by default -$ stp st show ST0015 design # Shows design.md -$ stp st show ST0015 all # Shows all files - -# Edit specific files -$ stp st edit ST0015 impl # Edit implementation notes +$ intent st list --status in_progress # Verify thread status matches task completion -$ stp status show ST0015 +$ intent status show ST0015 # Synchronise status based on task completion -$ stp status sync ST0015 +$ intent status sync ST0015 Updated ST0015 status from 'Not Started' to 'In Progress' ``` @@ -258,15 +240,16 @@ Steel threads facilitate team coordination: ## Real-world Examples -Let's examine actual steel threads from the STP project itself. +Let's examine actual steel threads from the Intent project itself. ### Example 1: Feature Implementation (ST0012 - Document Sync Command) -This thread implemented the `stp st sync` command: +This thread implemented the `intent st sync` command: -**Objective**: Create a command to keep the steel_threads.md index synchronised with individual thread directories. +**Objective**: Create a command to keep the steel_threads.md index synchronised with individual thread files. **Why This Works Well**: + - Clear, focused objective - Solves a specific problem (manual sync was error-prone) - Delivered complete functionality @@ -281,6 +264,7 @@ This thread reorganised completed steel threads: **Objective**: Implement directory structure to separate active and completed threads. **Why This Works Well**: + - Addresses a scaling problem - Simple, achievable scope - Clear success criteria @@ -292,13 +276,14 @@ This thread reorganised completed steel threads: The very blog post you're reading came from a steel thread! -**Objective**: Create blog series explaining STP concepts and methodology. +**Objective**: Create blog series explaining Intent concepts and methodology. **Why This Works Well**: + - Breaks large effort into manageable pieces - Each blog post has clear intent - Progress easily tracked through tasks -- Meta-demonstration of STP in action +- Meta-demonstration of Intent in action ### Patterns and Anti-patterns @@ -327,8 +312,9 @@ Steel threads force clarity about what constitutes "done." Each thread delivers ### Improved Visibility into Project Status The two-tier system provides perfect visibility: -- **Strategic View**: `stp st list` shows high-level progress -- **Tactical View**: `stp task list` reveals detailed work status + +- **Strategic View**: `intent st list` shows high-level progress +- **Tactical View**: `intent task list` reveals detailed work status - **Automatic Sync**: Status updates based on actual task completion ### Documentation That Evolves Naturally @@ -338,6 +324,7 @@ Because documentation starts before code and grows during implementation, it sta ### Enhanced Team Collaboration Steel threads create shared understanding: + - New team members quickly grasp context - Reviews focus on intention alignment - Handoffs include complete context @@ -346,6 +333,7 @@ Steel threads create shared understanding: ### Clear Demarcation of Completion No more arguing about whether something is "done": + - Objective met? ✓ - Tasks complete? ✓ - Documentation updated? ✓ diff --git a/stp/doc/blog/0003-intent-capture-in-software-development.md b/docs/blog/0003-intent-capture-in-software-development.md similarity index 93% rename from stp/doc/blog/0003-intent-capture-in-software-development.md rename to docs/blog/0003-intent-capture-in-software-development.md index f29ed4c..8ea282f 100644 --- a/stp/doc/blog/0003-intent-capture-in-software-development.md +++ b/docs/blog/0003-intent-capture-in-software-development.md @@ -8,7 +8,7 @@ word_count: 2156 # Intent Capture in Software Development: Bridging the Gap -We've explored [why intention matters](./0000-motivation-for-stp.md) and how [steel threads](./0002-the-steel-thread-methodology.md) provide a framework for preserving it. Now we turn to the critical skill that makes it all work: intent capture. How do we extract, document, and preserve the intentions that drive great software? +We've explored [why intention matters](./0000-motivation-for-intent.md) and how [steel threads](./0002-the-steel-thread-methodology.md) provide a framework for preserving it. Now we turn to the critical skill that makes it all work: intent capture. How do we extract, document, and preserve the intentions that drive great software? Intent capture isn't just about writing better documentation – it's about creating a shared understanding that survives the journey from conception to production and beyond. Today, we'll explore practical techniques for capturing intent effectively, whether you're working solo, with a team, or alongside AI assistants. @@ -25,6 +25,7 @@ Let's explore how to capture that clarity before it fades. ### What is "Intent" in Software Development? Intent encompasses the full context behind technical decisions: + - **Business Goals**: What problem does this solve for users? - **Technical Rationale**: Why this approach over alternatives? - **Constraints**: What limitations shaped the solution? @@ -68,6 +69,7 @@ Maybe that "over-engineered" cache prevents database meltdowns during traffic sp ### The Telephone Game Effect In long-lived codebases, understanding degrades like a game of telephone: + - Original developer: "Cache this because the API limits us to 100 calls/minute" - Six months later: "There's some caching here for performance" - Two years later: "Not sure why this is cached, probably outdated" @@ -76,6 +78,7 @@ In long-lived codebases, understanding degrades like a game of telephone: ### Intent as Future Context Well-captured intent provides context for future decisions: + - **Refactoring**: "Can I change this safely?" - **Debugging**: "Why does it work this way?" - **Enhancement**: "Will this change align with the design?" @@ -84,11 +87,13 @@ Well-captured intent provides context for future decisions: ### The What vs. Why Distinction **What** (Code shows this): + ```python cache_timeout = 3600 # 1 hour ``` **Why** (Intent captures this): + ```python # Cache for 1 hour because: # - User profiles change infrequently @@ -98,17 +103,18 @@ cache_timeout = 3600 # 1 hour cache_timeout = 3600 ``` -## How STP Addresses the Intent Problem +## How Intent Addresses the Intent Problem -STP attacks intent loss through systematic capture and preservation. Rather than hoping developers document intent, STP makes it part of the natural workflow. +Intent attacks intent loss through systematic capture and preservation. Rather than hoping developers document intent, Intent makes it part of the natural workflow. ### Documentation Alongside Code -STP keeps documentation in the repository, versioned with the code: +Intent keeps documentation in the repository, versioned with the code: + ``` project/ ├── src/ # Implementation -└── stp/ +└── intent/ ├── prj/st/ # Steel threads (intent) ├── eng/tpd/ # Technical design (rationale) └── usr/ # User perspective (purpose) @@ -128,7 +134,7 @@ Each steel thread captures multi-level intent: ### Structured Templates That Prompt -STP templates ask the right questions at the right time: +Intent templates ask the right questions at the right time: ```markdown ## Objective @@ -145,7 +151,7 @@ The structure guides without constraining. You can't skip intent because the tem ### Living Intent Records -STP maintains intent at multiple levels: +Intent maintains intent at multiple levels: - **Technical Product Design**: System-wide architectural intent - **Steel Threads**: Feature-level implementation intent @@ -161,6 +167,7 @@ Each document serves a different temporal scope, from permanent architectural de ❌ **Poor**: "Using PostgreSQL for data storage" ✓ **Better**: "Chose PostgreSQL over MongoDB because: + - Need ACID compliance for financial data - Complex relationships require joins - Team expertise is stronger with SQL @@ -204,6 +211,7 @@ Intent isn't static. As you learn, update the documentation: ### 5. Use Progressive Detail Capture intent at multiple levels: + - **Thread level**: Overall objective and approach - **Task level**: Specific implementation choices - **Code level**: Inline comments for non-obvious decisions @@ -215,6 +223,7 @@ LLMs excel at pattern matching but lack true understanding. Without explicit int ### How LLMs Struggle Without Intent **Without Intent**: + ``` Developer: "Optimise this database query" LLM: [Suggests adding indexes, rewriting joins] @@ -222,6 +231,7 @@ Result: Query is faster but breaks business logic that depended on row order ``` **With Intent**: + ``` Developer: "Optimise this query that generates billing reports. Row order must be preserved for legal compliance. Current performance: 45s for monthly reports." LLM: [Suggests partitioning strategy that maintains order while improving performance] @@ -253,6 +263,7 @@ LLMs can transform rough notes into structured intent: **Your notes**: "need to fix login slow, maybe redis session?" **LLM-assisted documentation**: + ```markdown ## Objective Reduce login latency from current 3-5 seconds to under 500ms @@ -285,7 +296,7 @@ class RateLimiter: **Six months later**: "Why is the limit 100? Can we change it? What breaks if we do?" -#### After: With STP-Style Intent Capture +#### After: With Intent-Style Intent Capture ```markdown # ST0042: API Rate Limiting Implementation @@ -324,13 +335,15 @@ class RateLimiter: **Scenario**: Business wants to offer a premium tier with higher limits -**Without Intent**: +**Without Intent**: + - Developer guesses why 100 was chosen - Increases to 500 "to be safe" - Accidentally enables abuse patterns - Infrastructure costs spike **With Intent**: + - Developer understands the analysis behind 100 - Knows premium users also fit the usage pattern - Implements tier-based limiting: 100 (free), 200 (premium) @@ -353,7 +366,8 @@ class RateLimiter: ### Measuring the ROI -Teams using STP-style intent capture report: +Teams using Intent-style intent capture report: + - **50% reduction** in time spent understanding existing code - **75% fewer** "why was this done?" meetings - **90% faster** onboarding for new developers @@ -376,4 +390,4 @@ When intent becomes explicit, development transforms from archaeology to archite In our next post, we'll explore how this foundation of captured intent enables unprecedented collaboration with LLMs, turning AI assistants from code generators into true development partners. -[Continue to: LLM Collaboration with STP →](./0004-llm-collaboration-with-stp.md) +[Continue to: LLM Collaboration with Intent →](./0004-llm-collaboration-with-intent.md) diff --git a/docs/blog/0004-llm-collaboration-with-intent.md b/docs/blog/0004-llm-collaboration-with-intent.md new file mode 100644 index 0000000..fe6af41 --- /dev/null +++ b/docs/blog/0004-llm-collaboration-with-intent.md @@ -0,0 +1,659 @@ +--- +title: "LLM Collaboration with Intent" +date: "2025-07-08" +author: "Matthew Sinclair" +draft: false +word_count: 2274 +--- + +# LLM Collaboration with Intent: Multiplying Development Capabilities + +We've built a foundation of [captured intention](./0003-intent-capture-in-software-development.md) using [steel threads](./0002-the-steel-thread-methodology.md). Now let me show you exactly how this transforms your AI coding experience. + +## The Difference is Night and Day + +### Without Intent + +``` +You: "Help me add caching to the user service" +Claude: "Here's a simple Redis cache implementation..." +[Generic code that misses your actual needs] + +You: "No, we need to handle traffic spikes" +Claude: "Oh, here's a different approach..." +[Still not right] + +You: "Also, we have API rate limits" +Claude: "Ah, now I understand. Let me start over..." +[Finally getting closer after 3 attempts] +``` + +### With Intent + +``` +You: "I'm working on ST0042" [paste steel thread] +Claude: "I see you need caching for: + - API rate limit: 100 req/min + - Expected traffic: 10K req/s during sales + - Constraint: Redis cluster already deployed + +Here's a multi-layer cache with rate limiting, +burst handling, and automatic failover..." +[Exactly what you need, first try] +``` + +The difference? Intent gives AI the context to understand your actual problem, not just your immediate request. + +## Why AI Assistants Need Intent + +### The Token Economy + +Every conversation with an AI has a context limit. Without Intent, you waste tokens re-explaining: + +```markdown +# Typical conversation without Intent: +Tokens used on explanation: 2,000 +Tokens used on actual problem: 500 +Total: 2,500 tokens, mostly wasted + +# With Intent: +Tokens for steel thread: 500 +Tokens for actual problem: 2,000 +Total: 2,500 tokens, mostly productive +``` + +### Intent Keeps AI Focused + +**Problem**: Give AI your entire codebase, it gets lost +**Solution**: Give AI one steel thread, it stays focused + +```bash +# This overwhelms the AI: +$ find . -name "*.py" | xargs cat | wc -l +45,000 lines of code + +# This focuses the AI: +$ intent st show ST0042 +200 lines of structured context +``` + +### Real Example: Authentication Implementation + +I used Intent to build authentication with Claude: + +```markdown +# ST0042: Multi-Service Authentication + +## Context +- 5 microservices need shared auth +- Cannot use sessions (stateless requirement) +- Must support enterprise SSO +- Peak load: 10K concurrent users + +## Constraints +- Existing users in PostgreSQL +- 15-minute token expiry (security audit) +- Zero-downtime migration required +``` + +Claude immediately suggested: + +- JWT with refresh tokens (not sessions) +- Token relay pattern for microservices +- Gradual migration strategy +- Redis for token revocation + +No back-and-forth. No wrong assumptions. Just solutions that fit MY constraints. + +## The LLM Collaboration Challenge + +Even powerful LLMs face fundamental challenges in software development collaboration: + +### Context Window Constraints + +LLMs have finite context windows. Dumping your entire codebase exceeds these limits and creates noise. The challenge: How do you provide enough context without overwhelming the model? + +### Information Overload + +More context isn't always better. LLMs can get lost in irrelevant details, missing the crucial information buried in thousands of lines of code. Quality beats quantity. + +### The Stale Context Problem + +Yesterday's context might mislead today's decisions. As code evolves, old assumptions become dangerous. Static documentation quickly becomes a liability rather than an asset. + +### Project Structure Complexity + +Explaining how different parts of your system interact requires more than showing code. LLMs need to understand relationships, dependencies, and architectural decisions. + +### Session Continuity + +Each new conversation starts fresh. Without systematic context management, you waste time re-explaining your project, and the LLM loses valuable understanding built in previous sessions. + +## How Intent Is Designed for LLM Collaboration + +Intent addresses each collaboration challenge through deliberate design choices: + +### The "Preamble to Claude" + +Our Technical Product Design starts with explicit instructions for LLMs: + +```markdown +## Preamble to Claude + +This document is a Technical Product Design (TPD) for the Intent (Intent) system. When processing this document, please understand: + +1. This is a comprehensive technical specification... +2. The system is designed to facilitate collaboration between developers and LLMs... +``` + +This isn't just documentation – it's a handshake protocol between human intent and AI understanding. + +### Structured Documentation That Fits LLM Thinking + +Intent templates mirror how LLMs process information: + +- **Clear hierarchies** that LLMs can navigate +- **Consistent patterns** that reduce parsing complexity +- **Explicit sections** for objectives, context, approach +- **Metadata frontmatter** for quick classification + +### Just-in-Time Context Loading + +Instead of overwhelming LLMs with everything, Intent enables focused context: + +```bash +# Load specific steel thread context +$ cat intent/prj/st/ST0042.md + +# Show current tasks for that thread +$ intent task list ST0042 + +# Check implementation status +$ intent status show ST0042 +``` + +Each command provides exactly the context needed for the current task. + +### The Information Flow Architecture + +``` +WIP (Current Focus) + │ + ├──▶ Steel Threads (Intent & Strategy) + │ │ + │ └──▶ Tasks (Granular Work) + │ + └──▶ Journal (Historical Context) +``` + +This flow ensures LLMs always have: + +1. Current focus (WIP) +2. Strategic context (Steel Threads) +3. Tactical details (Tasks) +4. Historical decisions (Journal) + +## Context Management Strategies + +Effective LLM collaboration requires strategic context management. Here's how Intent enables it: + +### Start with WIP (Work In Progress) + +The WIP document acts as a conversation starter: + +```markdown +# Work in Progress + +## Current Focus +Implementing authentication system (ST0042) +- Decided on JWT tokens over sessions +- Need to handle refresh token rotation +- Integrating with existing user service + +## Blockers +- Unclear how to handle multi-device login +``` + +This immediately orients the LLM to your current state and challenges. + +### Use Steel Threads as Context Containers + +Each steel thread provides bounded context: + +```bash +# Share a complete context unit +$ cat intent/prj/st/ST0042.md | pbcopy +# Now paste into LLM conversation +``` + +The LLM receives: + +- Clear objectives +- Relevant constraints +- Design decisions +- Current progress + +### Progressive Context Loading + +Start minimal, add detail as needed: + +1. **Initial**: "Working on ST0042 - Authentication System" +2. **If needed**: Share the steel thread document +3. **For specifics**: Show relevant task details +4. **For history**: Reference journal entries + +This prevents context overload while ensuring completeness. + +### Task Status as Progress Indicators + +```bash +$ intent status show ST0042 +Steel Thread: ST0042 +Current Status: In Progress +Task Summary: + Total Tasks: 8 + - Done: 5 + - In Progress: 1 + - Todo: 2 +``` + +LLMs immediately understand what's complete and what needs attention. + +## Templates and Structure that Enhance LLM Effectiveness + +Intent templates aren't arbitrary – they're designed to match how LLMs process information. + +### Why Structure Matters to LLMs + +LLMs excel at pattern recognition. Consistent structure becomes a pattern they can leverage: + +```markdown +--- +status: In Progress +created: 20250308 +--- +# ST0042: Authentication System + +## Objective +[LLMs immediately understand this is the goal] + +## Context +[LLMs know to find background information here] + +## Approach +[LLMs expect implementation strategy here] +``` + +The predictable structure reduces cognitive load and improves comprehension. + +### Frontmatter Metadata + +YAML frontmatter provides machine-readable context: + +```yaml +--- +status: In Progress # LLM knows work is active +created: 20250308 # LLM understands timeline +completed: # LLM sees this isn't done +author: Jane Smith # LLM knows who to reference +dependencies: [ST0038] # LLM understands relationships +--- +``` + +This metadata helps LLMs make contextual decisions without parsing prose. + +### Section Organisation for LLM Reasoning + +Intent sections follow a logical flow that mirrors problem-solving: + +1. **Objective**: What are we trying to achieve? +2. **Context**: Why does this matter? +3. **Approach**: How will we solve it? +4. **Tasks**: What specific work is needed? +5. **Implementation Notes**: What have we learned? +6. **Results**: What was the outcome? + +This progression helps LLMs understand not just the current state but the journey. + +### Consistent Formatting Patterns + +Intent uses consistent markers that LLMs can recognise: + +- `## Section Headers` for major divisions +- `- [ ] Task items` for work tracking +- `` ```language `` for code blocks +- `**Bold**` for emphasis +- `[Links](./file.md)` for relationships + +These patterns become navigational aids for LLM comprehension. + +## The Documentation-Implementation Feedback Loop + +Intent creates a virtuous cycle where documentation and implementation reinforce each other, with LLMs participating at every stage. + +### Documentation Drives Implementation + +```mermaid +Documentation → LLM Understanding → Better Suggestions → Quality Code +``` + +When you start with clear documentation: + +1. LLMs understand the complete context +2. Suggestions align with documented intent +3. Generated code fits the design +4. Implementation matches expectations + +### Implementation Updates Documentation + +As you code, discoveries flow back: + +```markdown +## Implementation Notes +[2024-03-15] Discovered rate limiting issue with auth tokens +[2024-03-16] Switched to sliding window approach +[2024-03-17] Added token bucket for burst capacity +``` + +LLMs learn from these updates, improving future suggestions. + +### Real Example: The Feedback Loop in Action + +**Initial Documentation**: + +```markdown +## Approach +Implement simple cache with 1-hour TTL +``` + +**LLM Suggestion**: "Consider cache invalidation strategy for multi-region deployment" + +**Updated Documentation**: + +```markdown +## Approach +Implement cache with 1-hour TTL +- Use event-based invalidation for consistency +- Redis pub/sub for multi-region coordination +``` + +**Result**: Better implementation informed by LLM insight, captured in documentation. + +### The Multiplier Effect + +Each cycle improves both documentation and code: + +- **Clearer intent** → Better LLM suggestions +- **Better suggestions** → Improved implementation +- **Improved implementation** → Refined documentation +- **Refined documentation** → Even clearer intent + +This isn't just about current development – it's about building a knowledge base that makes every future interaction more effective. + +## Claude Code Sub-Agents: Taking Collaboration Further + +Intent v2.1.0 includes integration with Claude Code's sub-agent system, revolutionizing AI-assisted development. + +### The Problem with Session Amnesia + +Every new Claude session typically means: + +- Re-explaining your project structure +- Describing Intent and steel threads +- Teaching command syntax again +- Clarifying team conventions + +It's like working with a brilliant colleague who has amnesia every morning. + +### Intent Agent: Persistent Knowledge + +With one command: + +```bash +intent agents install intent +``` + +Claude permanently understands: + +- Steel thread methodology +- All Intent commands and workflows +- Project organization patterns +- Best practices and conventions + +### The Transformation in Practice + +**Without Intent agent:** + +``` +You: "I need to add user notifications" +Claude: "I'll help you add notifications. What framework are you using?" +You: "We use Intent for project management..." +[5 minutes explaining] +Claude: "Where should I put the notification code?" +You: "First create a steel thread with intent st new..." +[More back and forth] +``` + +**With Intent agent:** + +``` +You: "I need to add user notifications" +Claude: "I'll help you create a steel thread for notifications: + + intent st new 'User Notification System' + +This creates ST0044. Let's document the intent: +- What types of notifications (email, push, in-app)? +- Real-time requirements? +- User preferences and opt-out needs? + +I'll help structure this properly and create Backlog tasks." +``` + +The conversation starts at a much higher level because Claude already knows the foundation. + +### Custom Project Agents + +Beyond the Intent agent, create project-specific agents: + +```bash +# Your team's conventions +$ cat intent/agents/team-standards/agent.md +--- +name: team-standards +--- +You know our standards: +- API versioning: /api/v{n}/{resource} +- Error format: RFC 7807 Problem Details +- Auth: JWT in Authorization: Bearer {token} +- Testing: 80% coverage minimum +- PR process: feature/* branches, 2 approvals +``` + +Now every Claude session knows YOUR specific rules without repeated explanation. + +### Specialized Global Agents + +Intent v2.2.0 includes specialized agents for common development scenarios: + +#### Elixir Agent + +For Elixir developers, the elixir agent provides: + +- Pure functional programming patterns and Usage Rules +- Ash and Phoenix framework expertise +- Idiomatic Elixir code review and refactoring +- Railway-oriented error handling patterns + +```bash +intent agents install elixir +``` + +#### Socrates Agent - CTO Review Mode + +The socrates agent facilitates technical decision-making through Socratic dialog: + +```bash +intent agents install socrates +``` + +This agent creates structured conversations between two personas: + +- **Socrates (CTO)**: Strategic thinker with 30+ years experience +- **Plato (Tech Lead)**: Implementation expert with deep technical knowledge + +Perfect for: + +- Architecture decisions (microservices vs monolith) +- Technology selection (build vs buy, framework choices) +- Complex refactoring strategies +- API design and integration planning + +Example usage: + +``` +You: "I need to decide between PostgreSQL and DynamoDB" +Claude (with socrates agent): "I'll facilitate a CTO Review Mode dialog: + +**Socrates (CTO):** What's driving this database decision? Are we +optimizing for consistency or scale? + +**Plato (Tech Lead):** We're expecting 1M users in year one with +burst traffic during sales. The team has PostgreSQL expertise... + +[Dialog continues exploring trade-offs, revealing hidden complexities] + +**Recommendation:** PostgreSQL with read replicas and caching layer +- Leverages team expertise +- Handles expected scale with proven patterns +- Avoids NoSQL learning curve during critical growth phase" +``` + +The socrates agent transforms rubber-duck debugging into structured technical exploration, creating decision documentation that explains not just what was decided, but why. + +### The Multiplier Effect + +Intent agents don't just save time – they change what's possible: + +1. **Deeper Conversations**: Skip basics, discuss architecture +2. **Consistent Practices**: AI always follows your standards +3. **Knowledge Preservation**: Team wisdom encoded in agents +4. **Onboarding Acceleration**: New devs get AI that knows your ways + +## Future Opportunities for LLM Integration + +We're just scratching the surface of what's possible when development methodologies embrace LLM collaboration. + +### Automated Documentation Validation + +LLMs could continuously validate documentation against implementation: + +```bash +$ stp validate ST0042 +Checking documentation-implementation alignment... +⚠ Implementation includes rate limiting not mentioned in approach +⚠ Task list shows 8 items but only 6 are documented +✓ All objectives have corresponding implementation +``` + +### LLM-Powered Steel Thread Creation + +Imagine describing a feature and having an LLM draft the steel thread: + +``` +You: "We need to add data export functionality for compliance" + +LLM: "I'll create a steel thread for this. Based on your project: +- Objective: Enable GDPR-compliant data export +- Context: Legal requirement, 30-day deadline +- Approach: Queue-based async processing +- Tasks: [generates task breakdown]" +``` + +### Intelligent Context Selection + +Future Intent could automatically select relevant context: + +```bash +$ stp context "working on authentication" +Relevant context loaded: +- ST0042: Authentication System (current) +- ST0038: User Service (dependency) +- Journal: 2024-03-15 auth decisions +- Related tasks: task-45, task-46 +``` + +### The MCP Revolution + +Anthropic's Machine Control Protocol opens new possibilities: + +- LLMs directly executing Intent commands +- Autonomous steel thread management +- Real-time documentation updates +- Integrated development environments + +## Practical LLM Collaboration Workflow + +### Morning Standup with Your AI + +```bash +# Start your day by loading context +$ cat intent/wip.md +Current focus: Implementing auth refresh tokens (ST0042) +Blocker: Unsure about multi-device logout + +$ intent st show ST0042 | pbcopy +# Paste to Claude/ChatGPT +``` + +**You**: "I'm stuck on multi-device logout with our JWT strategy" + +**Claude**: "I see from ST0042 you're using 15-min access tokens with 7-day refresh tokens. For multi-device logout with your constraints: + +1. Add a token family ID to track related tokens +2. Store revoked families in Redis (you already have it deployed) +3. Check revocation on token refresh, not every request (performance) +Here's the implementation..." + +### Discovering Your Own Code + +Six months later: + +```bash +$ intent st list | grep cache +ST0015 [Completed] Implement multi-layer cache for API protection +ST0023 [Completed] Add cache warming for user profiles +ST0031 [In Progress] Migrate cache to Redis cluster + +$ intent st show ST0015 +# Aha! That's why the cache is so complex +# Shows: API limits, traffic patterns, decisions made +``` + +### Team Knowledge Sharing + +New developer joins: + +```bash +# Instead of: "Let me explain our auth system..." +$ intent st show ST0042 > auth-context.md +$ echo "Start here. This explains everything." + +# They read: +- Why JWT over sessions (microservices) +- Why 15-min expiry (security audit) +- Why Redis for revocation (already in stack) +- Implementation gotchas discovered +``` + +The new developer is productive in hours, not weeks. + +## Transforming Development Through Collaboration + +Intent doesn't just make LLM collaboration possible – it makes it powerful. By providing structure, context, and clear intention, Intent transforms LLMs from code generators into true development partners. + +The future of software development isn't human or AI – it's human and AI, working together with shared understanding. Intent provides the foundation for that collaboration. + +Ready to put this into practice? Our next post will guide you through setting up Intent in your own projects and creating your first intention-aware, LLM-collaborative development workflow. + +[Continue to: Getting Started with Intent →](./0005-getting-started-with-intent.md) diff --git a/stp/doc/blog/0005-getting-started-with-stp.md b/docs/blog/0005-getting-started-with-intent.md similarity index 64% rename from stp/doc/blog/0005-getting-started-with-stp.md rename to docs/blog/0005-getting-started-with-intent.md index 54a574a..48927ec 100644 --- a/stp/doc/blog/0005-getting-started-with-stp.md +++ b/docs/blog/0005-getting-started-with-intent.md @@ -1,152 +1,158 @@ --- -title: "Getting Started with STP" +title: "Getting Started with Intent" date: "2025-07-08" author: "Matthew Sinclair" draft: false word_count: 2489 --- -# Getting Started with STP: Your Practical Implementation Guide +# Getting Started with Intent: Your Practical Implementation Guide -After exploring the [philosophy](./0000-motivation-for-stp.md) and [methodology](./0002-the-steel-thread-methodology.md) behind STP, you're ready to implement it in your own projects. This guide walks you through installation, daily workflow, and practical tips for success. +After exploring the [philosophy](./0000-motivation-for-intent.md) and [methodology](./0002-the-steel-thread-methodology.md) behind Intent, you're ready to implement it in your own projects. This guide walks you through installation, daily workflow, and practical tips for success. -What makes this guide unique? We used STP itself to manage the creation of this blog series. Throughout this post, I'll share real command outputs and workflow states from our actual process, giving you an authentic view of STP in action. +What makes this guide unique? We used Intent itself to manage the creation of this blog series. Throughout this post, I'll share real command outputs and workflow states from our actual process, giving you an authentic view of Intent in action. ## Installation and Setup ### Prerequisites -STP requires minimal dependencies: +Intent requires minimal dependencies: + - Bash shell (version 4.0+) - Git for version control - A text editor that handles Markdown - Optional: Node.js for Backlog.md integration -### Installing STP +### Installing Intent 1. **Clone the repository**: + ```bash git clone https://github.com/matthewsinclair/stp.git cd stp ``` -2. **Add STP to your PATH**: +2. **Add Intent to your PATH**: + ```bash -export PATH="$PATH:$(pwd)/stp/bin" +export PATH="$PATH:$(pwd)/intent/bin" # Add to your shell profile for persistence ``` 3. **Verify installation**: + ```bash $ stp --version -STP version 1.2.1 +Intent version 1.0.0 ``` ### Installing Backlog.md -STP integrates beautifully with Backlog.md for task management: +Intent integrates beautifully with Backlog.md for task management: ```bash # Install Backlog globally npm install -g @backlog/cli # Initialize in your project -stp bl init +intent bl init ``` -### Bootstrapping STP in Your Project +### Bootstrapping Intent in Your Project **For new projects**: + ```bash mkdir my-project && cd my-project git init -stp init +intent init +``` + +If Claude Code is installed, Intent will offer to install the Intent agent: + +``` +Claude Code detected! +Would you like to install the Intent sub-agent? [Y/n] ``` -This creates the STP directory structure: +Say yes! This gives Claude instant understanding of Intent methodology. + +This creates the Intent directory structure: + ``` my-project/ -├── stp/ +├── intent/ │ ├── _templ/ # Templates -│ ├── bin/ # STP scripts +│ ├── bin/ # Intent scripts │ ├── doc/ # Documentation │ ├── eng/ # Engineering docs │ │ └── tpd/ # Technical Product Design │ ├── prj/ # Project management -│ │ ├── st/ # Steel threads (directories in v1.2.1+) -│ │ │ ├── ST0001/ # Example steel thread -│ │ │ │ ├── info.md -│ │ │ │ ├── design.md -│ │ │ │ └── ... -│ │ │ └── steel_threads.md +│ │ ├── st/ # Steel threads +│ │ ├── journal.md │ │ └── wip.md │ └── usr/ # User documentation └── backlog/ # Backlog.md tasks ``` **For existing projects**: + ```bash cd existing-project -stp init --integrate +intent init --integrate ``` -This adds STP without disrupting your current structure. +This adds Intent without disrupting your current structure. ## Basic Commands and Workflow -### Core STP Commands +### Core Intent Commands **Steel Thread Management**: + ```bash # Create a new steel thread -$ stp st new +$ intent st new Enter title: Implement user authentication Created: ST0015 # List all steel threads -$ stp st list +$ intent st list ID | Title | Status | Created -------|------------------------|-------------|------------ ST0015 | Implement user auth... | Not Started | 2025-07-08 ST0014 | Directory Structure... | In Progress | 2025-03-20 -# Show specific steel thread (info.md by default) -$ stp st show ST0015 - -# Show specific file in steel thread -$ stp st show ST0015 design # View design decisions -$ stp st show ST0015 impl # View implementation notes -$ stp st show ST0015 all # View all files - -# Edit specific files -$ stp st edit ST0015 # Edit info.md (default) -$ stp st edit ST0015 tasks # Edit tasks.md +# Show specific steel thread +$ intent st show ST0015 # Sync steel thread index -$ stp st sync --write +$ intent st sync --write ``` **Task Management with Backlog Integration**: + ```bash # Create tasks linked to steel threads -$ stp task create ST0015 "Research auth libraries" +$ intent task create ST0015 "Research auth libraries" Created task task-59 # List tasks for a steel thread -$ stp task list ST0015 +$ intent task list ST0015 Tasks for ST0015: ================ task-59 [todo] Research auth libraries # Use Backlog commands via wrapper -$ stp bl list # Avoids git errors -$ stp bl board # View Kanban board +$ intent bl list # Avoids git errors +$ intent bl board # View Kanban board ``` **Status Synchronisation**: + ```bash # Check if thread status matches task completion -$ stp status show ST0015 +$ intent status show ST0015 Steel Thread: ST0015 Current Status: Not Started Task Summary: @@ -155,16 +161,17 @@ Task Summary: Recommended Status: Not Started # Sync status based on task completion -$ stp status sync ST0015 +$ intent status sync ST0015 ``` -## Daily Workflow with STP +## Daily Workflow with Intent ### Starting Your Day 1. **Check your WIP document**: + ```bash -$ cat stp/prj/wip.md +$ cat intent/prj/wip.md # Work in Progress ## Current Focus @@ -174,8 +181,9 @@ Working on authentication system (ST0015) ``` 2. **Review active steel threads**: + ```bash -$ stp st list --status in_progress +intent st list --status in_progress ``` ### Creating Your First Steel Thread @@ -183,11 +191,11 @@ $ stp st list --status in_progress Let's walk through creating a real steel thread: ```bash -$ stp st new +$ intent st new Enter title: Add user profile editing Created: ST0016 -$ cd stp/prj/st +$ cd intent/prj/st $ edit ST0016.md ``` @@ -220,16 +228,16 @@ profiles after registration. This impacts user retention. Create tasks for implementation: ```bash -$ stp task create ST0016 "Design profile edit API" +$ intent task create ST0016 "Design profile edit API" Created task task-60 -$ stp task create ST0016 "Implement backend validation" +$ intent task create ST0016 "Implement backend validation" Created task task-61 -$ stp task create ST0016 "Create profile edit UI" +$ intent task create ST0016 "Create profile edit UI" Created task task-62 -$ stp task create ST0016 "Add audit logging" +$ intent task create ST0016 "Add audit logging" Created task task-63 ``` @@ -242,7 +250,7 @@ $ edit "task-60 - ST0016-Design-profile-edit-API.md" # Change status: To Do -> In Progress # Check progress -$ stp task list ST0016 +$ intent task list ST0016 Tasks for ST0016: ================ task-60 [in_progress] Design profile edit API @@ -251,28 +259,28 @@ task-62 [todo] Create profile edit UI task-63 [todo] Add audit logging # Update steel thread status -$ stp status sync ST0016 +$ intent status sync ST0016 Updated ST0016 status from 'Not Started' to 'In Progress' ``` -## Case Study: How We Used STP to Write This Blog Series +## Case Study: How We Used Intent to Write This Blog Series -This blog series itself demonstrates STP in action. Let me show you the actual workflow we used. +This blog series itself demonstrates Intent in action. Let me show you the actual workflow we used. ### The Steel Thread We started with ST0013: ```bash -$ stp st show ST0013 -# ST0013: STP Blog Post Series +$ intent st show ST0013 +# ST0013: Intent Blog Post Series - **Status**: Not Started → In Progress - **Created**: 2025-03-11 - **Author**: Matthew Sinclair ## Objective -Create a series of blog posts about the Steel Thread Process (STP) +Create a series of blog posts about the Intent (Intent) to explain its purpose, philosophy, and implementation. ``` @@ -281,7 +289,7 @@ to explain its purpose, philosophy, and implementation. We created 52 tasks across 7 blog posts. Here's our initial task list: ```bash -$ stp bl list +$ intent bl list To Do: task-6 - ST0013 - Research existing docs for blog 0000 task-7 - ST0013 - Write introduction section for blog 0000 @@ -311,10 +319,10 @@ $ edit "task-7 - ST0013-Write-introduction.md" After completing blog post 0003: ```bash -$ stp task list ST0013 | grep -c "\[done\]" +$ intent task list ST0013 | grep -c "\[done\]" 29 -$ stp status show ST0013 +$ intent status show ST0013 Steel Thread: ST0013 Current Status: Not Started Task Summary: @@ -330,28 +338,28 @@ Recommended Status: In Progress 1. **Granular tasks maintain momentum**: Each blog section as a separate task meant constant progress 2. **Status synchronisation reveals truth**: The mismatch between "Not Started" and 29 completed tasks showed we needed to sync 3. **Templates guide consistency**: Every blog post followed the same task pattern -4. **Meta-documentation is powerful**: Using STP to document STP creation provides authentic examples +4. **Meta-documentation is powerful**: Using Intent to document Intent creation provides authentic examples -## Integrating STP into Your Existing Process +## Integrating Intent into Your Existing Process -STP enhances rather than replaces your current methodology: +Intent enhances rather than replaces your current methodology: ### With Agile/Scrum - **User Stories** → Map to steel threads - **Sprint Planning** → Break threads into Backlog tasks -- **Daily Standups** → Reference `stp task list` output +- **Daily Standups** → Reference `intent task list` output - **Sprint Review** → Show completed threads ### With Kanban - **Work Items** → Steel threads flow across board - **WIP Limits** → Limit in-progress threads -- **Flow Metrics** → Track via `stp status` +- **Flow Metrics** → Track via `intent status` ### Gradual Adoption Strategy -1. **Week 1**: Start with WIP and basic task tracking +1. **Week 1**: Start with WIP and journal only 2. **Week 2**: Create first steel thread for new feature 3. **Week 3**: Add Backlog integration 4. **Week 4**: Full workflow with status sync @@ -363,17 +371,20 @@ Teams report smooth adoption with no workflow disruption. ### Steel Thread Granularity ✓ **Good Steel Thread Size**: + - Completable in 1-2 weeks - Delivers visible value - Has clear success criteria - 5-15 associated tasks ❌ **Too Large**: + - "Redesign entire application" - No clear endpoint - Dozens of tasks ❌ **Too Small**: + - "Fix typo in README" - Single task - No strategic value @@ -390,20 +401,49 @@ Steel Thread: Add user notifications └── Documentation: Update API docs ``` +### Managing Claude Code Agents + +If you're using Claude Code, Intent's agent system supercharges your AI collaboration: + +```bash +# Check available agents +$ intent agents list +Available Agents: + intent - Intent-aware development assistant [NOT INSTALLED] + elixir - Elixir code doctor with Usage Rules [NOT INSTALLED] + +# Install the Intent agent +$ intent agents install intent +Installing agent: intent +Installed successfully + +# Verify installation +$ intent agents status +intent [OK] +``` + +Now when you paste steel threads to Claude, it already understands: +- How to create new steel threads +- Intent command syntax +- Project organization patterns +- Backlog integration + ### Essential Best Practices -1. **Always use the `stp bl` wrapper**: Prevents git errors +1. **Always use the `intent bl` wrapper**: Prevents git errors + ```bash # Good - $ stp bl list + $ intent bl list # Avoid $ backlog list # May cause git conflicts ``` 2. **Consistent task naming**: `ST#### - Description` + ```bash - $ stp task create ST0016 "Implement caching layer" + $ intent task create ST0016 "Implement caching layer" # Creates: task-64 - ST0016-Implement-caching-layer.md ``` @@ -413,58 +453,64 @@ Steel Thread: Add user notifications - Capture decisions in real-time 4. **LLM collaboration pattern**: + ```bash # 1. Share context - $ cat stp/prj/st/ST0016.md | pbcopy + $ cat intent/prj/st/ST0016.md | pbcopy # 2. Get LLM help "I'm working on ST0016, need help with..." # 3. Update documentation - $ edit stp/prj/st/ST0016.md + $ edit intent/prj/st/ST0016.md # Add new insights to Implementation Notes ``` -5. **Daily progress tracking**: +5. **Daily journal habit**: + ```bash - $ stp bl task edit task-16 --status Done - # Task marked as completed - # Update steel thread status if needed + $ edit intent/prj/journal.md + ## 2025-07-08 + - Completed profile edit API (ST0016) - Discovered rate limiting issue - Decision: Implement token bucket ``` ## Resources for Further Learning -### STP Documentation -- **Reference Guide**: `stp/usr/reference_guide.md` +### Intent Documentation + +- **Reference Guide**: `intent/usr/reference_guide.md` - **Command Help**: `stp <command> --help` -- **Template Library**: `stp/_templ/` +- **Template Library**: `intent/_templ/` + +### Example Projects Using Intent -### Example Projects Using STP -- STP itself (meta!) +- Intent itself (meta!) - [Community showcase](https://github.com/stp-community) ### Related Tools and Integrations + - [Backlog.md](https://backlog.md) - Task management - [Claude Code](https://claude.ai/code) - LLM pair programming - Git hooks for automation ### Getting Help + - GitHub Issues: Report bugs and request features - Discussions: Share patterns and get advice - Wiki: Community-contributed guides -## Start Your STP Journey Today +## Start Your Intent Journey Today -You now have everything needed to implement STP in your projects. Start small: +You now have everything needed to implement Intent in your projects. Start small: -1. Install STP +1. Install Intent 2. Create your first steel thread 3. Break it into tasks 4. Experience the clarity -Remember: STP isn't about perfect documentation. It's about capturing enough intention to make future development decisions with confidence. +Remember: Intent isn't about perfect documentation. It's about capturing enough intention to make future development decisions with confidence. The journey from confusion to clarity starts with a single steel thread. diff --git a/stp/doc/blog/0006-next-steps-and-future-work.md b/docs/blog/0006-next-steps-and-future-work.md similarity index 76% rename from stp/doc/blog/0006-next-steps-and-future-work.md rename to docs/blog/0006-next-steps-and-future-work.md index 8c33b89..1fadb96 100644 --- a/stp/doc/blog/0006-next-steps-and-future-work.md +++ b/docs/blog/0006-next-steps-and-future-work.md @@ -6,19 +6,20 @@ draft: false word_count: 1608 --- -# Next Steps and Future Work: The Evolution of STP +# Next Steps and Future Work: The Evolution of Intent -Through this blog series, we've explored the [motivation](./0000-motivation-for-stp.md), [methodology](./0002-the-steel-thread-methodology.md), and [practical implementation](./0005-getting-started-with-stp.md) of STP. Now we look forward – where is STP heading, and how can you be part of its evolution? +Through this blog series, we've explored the [motivation](./0000-motivation-for-intent.md), [methodology](./0002-the-steel-thread-methodology.md), and [practical implementation](./0005-getting-started-with-intent.md) of Intent. Now we look forward – where is Intent heading, and how can you be part of its evolution? -This final post examines the current state of STP, explores planned enhancements, and shares our vision for the future of intention-aware development. Whether you're considering adopting STP or already using it, this roadmap shows where we're going together. +This final post examines the current state of Intent, explores planned enhancements, and shares our vision for the future of intention-aware development. Whether you're considering adopting Intent or already using it, this roadmap shows where we're going together. -## Current State of STP +## Current State of Intent -### What STP Delivers Today +### What Intent Delivers Today -STP has evolved from concept to practical tool, currently offering: +Intent has evolved from concept to practical tool, currently offering: **Core Features**: + - Steel thread management with full lifecycle tracking - Integrated Backlog.md for granular task management - Automatic status synchronisation based on task completion @@ -28,14 +29,16 @@ STP has evolved from concept to practical tool, currently offering: - LLM-optimised documentation structure **Key Strengths**: + - **Lightweight**: Simple bash scripts and markdown files - **Flexible**: Adapts to any development methodology - **Practical**: Solves real documentation and context problems -- **Proven**: Used to build STP itself (and write this blog series!) +- **Proven**: Used to build Intent itself (and write this blog series!) ### Early Adoption Insights -Teams using STP report: +Teams using Intent report: + - **Reduced onboarding time**: New developers productive 50% faster - **Better LLM interactions**: More relevant suggestions, fewer iterations - **Improved project visibility**: Clear status without status meetings @@ -43,7 +46,8 @@ Teams using STP report: ### Current Limitations -We're honest about what STP doesn't yet do: +We're honest about what Intent doesn't yet do: + - No GUI (command-line only) - Limited reporting capabilities - Manual setup required @@ -52,27 +56,30 @@ We're honest about what STP doesn't yet do: These limitations guide our development priorities. -## Lessons Learned from Building STP +## Lessons Learned from Building Intent ### The Power of Dogfooding -Using STP to build STP revealed crucial insights: +Using Intent to build Intent revealed crucial insights: + - **Templates need flexibility**: Rigid structures frustrate users - **Granularity matters**: Too fine and it's overhead, too coarse and you lose visibility -- **Integration beats isolation**: The Backlog.md integration multiplied STP's value +- **Integration beats isolation**: The Backlog.md integration multiplied Intent's value - **Simplicity wins**: Every complex feature we removed improved adoption ### Unexpected Benefits Some outcomes surprised us: + - **Journal as team memory**: Daily entries became invaluable for debugging months later - **WIP as conversation starter**: The simple WIP doc improved team communication - **Steel threads as onboarding tool**: New developers could understand project structure instantly -- **Meta-documentation power**: Using STP for non-code projects (like this blog series) proved its versatility +- **Meta-documentation power**: Using Intent for non-code projects (like this blog series) proved its versatility ### What Didn't Work Honesty about failures improves the tool: + - **Automated git commits**: Too magical, removed user control - **Complex status rules**: Simple percentage-based sync worked better - **Mandatory fields**: Flexibility trumped enforced completeness @@ -83,12 +90,14 @@ Honesty about failures improves the tool: ### Q1 2025: Foundation Enhancements **Configuration System** (March 2025) + - User-configurable defaults for templates - Project-specific settings files - Environment variable support - Custom command aliases **Enhanced Reporting** (April 2025) + - Progress dashboards - Velocity tracking - Task burndown charts @@ -97,13 +106,15 @@ Honesty about failures improves the tool: ### Q2 2025: Team Collaboration **Multi-user Support** (May 2025) + - User attribution in steel threads - Team member assignment - Collaborative editing workflows - Merge conflict resolution **Integration APIs** (June 2025) -- RESTful API for STP operations + +- RESTful API for Intent operations - Webhooks for status changes - External tool integration points - Programmatic access to all features @@ -111,12 +122,14 @@ Honesty about failures improves the tool: ### Q3 2025: Advanced Features **LLM Integration Suite** (July 2025) + - Native Claude MCP support - Context window optimisation - Automated summarisation - Intent validation checks **Visual Interface** (August 2025) + - Web-based dashboard - Steel thread visualisation - Dependency graphs @@ -125,12 +138,14 @@ Honesty about failures improves the tool: ### Q4 2025: Enterprise Ready **Scalability** (October 2025) + - Performance optimisations for 1000+ threads - Distributed team support - Archive and retrieval system - Advanced search capabilities **Security and Compliance** (November 2025) + - Role-based access control - Audit trails - Compliance reporting @@ -138,67 +153,74 @@ Honesty about failures improves the tool: ## Integration Opportunities -STP's design philosophy embraces integration rather than isolation. Here's how we're building bridges to your existing tools and workflows. +Intent's design philosophy embraces integration rather than isolation. Here's how we're building bridges to your existing tools and workflows. ### Development Environment Integration **IDE Plugins** (In Development) -- **VS Code Extension**: + +- **VS Code Extension**: - Steel thread navigation in sidebar - Quick commands from command palette - Inline task status indicators - Template snippets - **IntelliJ/WebStorm Plugin**: - - Project tool window for STP + - Project tool window for Intent - Integrated task management - Refactoring support for thread IDs **Editor Support**: + ```vim -" Example .vimrc for STP -nnoremap <leader>st :!stp st show <C-R><C-W><CR> -nnoremap <leader>tl :!stp task list <C-R><C-W><CR> +" Example .vimrc for Intent +nnoremap <leader>st :!intent st show <C-R><C-W><CR> +nnoremap <leader>tl :!intent task list <C-R><C-W><CR> ``` ### CI/CD Pipeline Integration **Build System Hooks**: + ```yaml # GitHub Actions example -- name: Validate STP Structure +- name: Validate Intent Structure run: stp validate - name: Generate Release Notes run: stp release-notes --from=${{ github.event.before }} - name: Update Thread Status - run: stp status sync --all + run: intent status sync --all ``` **Pre-commit Hooks**: + ```bash #!/bin/bash # .git/hooks/pre-commit stp validate || exit 1 -stp status check --fail-on-mismatch +intent status check --fail-on-mismatch ``` ### Project Management Tools **Jira Integration** (Planned): + - Bidirectional sync between issues and threads - Status mapping configuration - Custom field support - Automated thread creation from epics **GitHub/GitLab Integration**: + - Pull request templates with thread links - Issue templates from steel threads - Automated PR descriptions - Status badges in README **Slack/Teams Notifications**: + ```javascript // Example webhook integration { @@ -216,12 +238,14 @@ stp status check --fail-on-mismatch ### Analytics and Visualisation **Grafana Dashboards**: + - Thread completion metrics - Team velocity tracking - Task distribution analysis - Intent preservation score **Power BI/Tableau**: + - Export connectors for business reporting - Custom visualisations for thread dependencies - Resource allocation views @@ -229,10 +253,11 @@ stp status check --fail-on-mismatch ### LLM Platform Integration **Claude MCP (Model Context Protocol)**: + ```json { "name": "stp-context", - "description": "Provides STP context to Claude", + "description": "Provides Intent context to Claude", "capabilities": { "thread_access": true, "task_management": true, @@ -242,34 +267,39 @@ stp status check --fail-on-mismatch ``` **OpenAI Custom GPTs**: -- STP-aware assistants + +- Intent-aware assistants - Thread-based context injection - Automated documentation generation **Local LLM Support**: + - Ollama integration for privacy - Custom prompts for different models - Context window optimisation -## Contributing to STP +## Contributing to Intent -STP thrives on community contributions. Here's how you can help shape its future. +Intent thrives on community contributions. Here's how you can help shape its future. ### Ways to Contribute **Code Contributions**: + - Bug fixes and improvements - New command implementations - Integration modules - Performance optimisations **Documentation**: + - Improve existing guides - Write tutorials - Share case studies - Translate documentation **Templates and Patterns**: + - Industry-specific templates - Methodology adaptations - Workflow patterns @@ -278,19 +308,22 @@ STP thrives on community contributions. Here's how you can help shape its future ### Contribution Workflow 1. **Fork and Clone**: + ```bash -git clone https://github.com/matthewsinclair/stp.git +git clone https://github.com/yourusername/stp.git cd stp ``` 2. **Create a Steel Thread**: + ```bash -stp st new +intent st new # Title: Add GitHub integration # Creates: ST0017 ``` 3. **Document Your Intent**: + ```markdown ## Objective Enable direct GitHub issue creation from steel threads @@ -305,14 +338,16 @@ Users want seamless integration with GitHub workflow ``` 4. **Implement with Tasks**: + ```bash -stp task create ST0017 "Research GitHub API options" -stp task create ST0017 "Implement issue creation" -stp task create ST0017 "Add status sync" -stp task create ST0017 "Write integration tests" +intent task create ST0017 "Research GitHub API options" +intent task create ST0017 "Implement issue creation" +intent task create ST0017 "Add status sync" +intent task create ST0017 "Write integration tests" ``` 5. **Submit PR**: + - Reference your steel thread - Include task completion status - Document design decisions @@ -320,18 +355,21 @@ stp task create ST0017 "Write integration tests" ### Community Guidelines **Code Style**: + - Follow existing patterns - Include tests - Document public APIs - Use meaningful commit messages **Documentation Standards**: + - Clear, concise writing - Practical examples - British English - Active voice **Review Process**: + - All PRs reviewed within 48 hours - Constructive feedback encouraged - Focus on intent preservation @@ -340,10 +378,11 @@ stp task create ST0017 "Write integration tests" ### Building Extensions **Custom Commands**: + ```bash #!/bin/bash -# stp/bin/stp-mycommand -source "${STP_HOME}/lib/common.sh" +# intent/bin/stp-mycommand +source "${Intent_HOME}/lib/common.sh" mycommand_function() { # Your implementation @@ -351,8 +390,9 @@ mycommand_function() { ``` **Template Creation**: + ```markdown -<!-- stp/_templ/industry/healthcare-thread.md --> +<!-- intent/_templ/industry/healthcare-thread.md --> --- status: Not Started compliance: HIPAA @@ -369,14 +409,16 @@ risk_assessment: Required ### Community Resources -Here are some things that will help build out a community of use around STP (note: this is all speculative and very much future todo). +Here are some things that will help build out a community of use around Intent (note: this is all speculative and very much future todo). **Getting Help**: + - GitHub Discussions for questions - Discord for real-time chat - Stack Overflow tag: `steel-thread-project` **Showcasing Your Work**: + - Community showcase repository - Monthly spotlight features - Conference talk opportunities @@ -386,31 +428,35 @@ Here are some things that will help build out a community of use around STP (not Here are some genuinely crazy ideas for where this can go (note: this is even more speculative and very much the realm of fantasy, as least as of today). -### STP as Development Standard +### Intent as Development Standard -We envision STP becoming the de facto standard for intention-aware development: +We envision Intent becoming the de facto standard for intention-aware development: **Industry Adoption**: -- Financial services using STP for audit trails + +- Financial services using Intent for audit trails - Healthcare tracking decision rationale - Government preserving project knowledge - Startups maintaining context through pivots **Educational Integration**: + - University courses teaching intent-first development -- Bootcamps including STP in curriculum +- Bootcamps including Intent in curriculum - Professional certifications available -- Open courseware using STP +- Open courseware using Intent ### The Next Generation of Development **AI-Native Workflows**: + - LLMs as first-class development partners - Intent becomes primary, code secondary - Natural language project specifications - Automated implementation from intentions **Knowledge Preservation**: + - Organisational memory that survives staff changes - Searchable decision databases - Intent graphs showing project evolution @@ -419,12 +465,14 @@ We envision STP becoming the de facto standard for intention-aware development: ### Long-term Vision **2027: Universal Adoption** -- STP integrated into major IDEs + +- Intent integrated into major IDEs - Standard practice in Fortune 500 - 1M+ active users - Native cloud platform support **2030: Paradigm Shift** + - Intent-first becomes default methodology - Code generation from steel threads - AI validates against original intent @@ -440,8 +488,8 @@ The future of software development is intention-aware, and it starts with your n --- -*Thank you for joining us on this journey through STP. From understanding [why intention matters](./0000-motivation-for-stp.md) to envisioning the future of development, you now have the knowledge to transform how your team builds software.* +*Thank you for joining us on this journey through Intent. From understanding [why intention matters](./0000-motivation-for-intent.md) to envisioning the future of development, you now have the knowledge to transform how your team builds software.* -*Ready to start? [Install STP](./0005-getting-started-with-stp.md#installation-and-setup) and create your first steel thread today.* +*Ready to start? [Install Intent](./0005-getting-started-with-intent.md#installation-and-setup) and create your first steel thread today.* **Remember**: Great software isn't just about what it does – it's about why it exists. diff --git a/docs/blog/0007-intent-agents-supercharge-claude.md b/docs/blog/0007-intent-agents-supercharge-claude.md new file mode 100644 index 0000000..5148938 --- /dev/null +++ b/docs/blog/0007-intent-agents-supercharge-claude.md @@ -0,0 +1,344 @@ +--- +title: "Intent Agents: Supercharging Claude Code Collaboration" +date: "2025-07-27" +author: "Matthew Sinclair" +draft: false +word_count: 1847 +--- + +# Intent Agents: Supercharging Claude Code Collaboration + +We've built a system that [captures intention](./0003-intent-capture-in-software-development.md) and enables [powerful LLM collaboration](./0004-llm-collaboration-with-intent.md). Now, Intent v2.1.0 takes this to the next level with Claude Code sub-agent integration and proper initialization. + +## The Context Problem + +Every time you start a new Claude session, you face the same ritual: + +``` +You: "Help me add a caching layer" +Claude: "I'll help you add caching. What's your project structure?" +You: "I use Intent with steel threads for organization..." +Claude: "Can you explain what steel threads are?" +You: "They're self-contained units of work..." +[5 minutes of explanation later] +Claude: "OK, so should I create a new file for the cache?" +You: "No, first run 'intent st new'..." +``` + +It's like having a brilliant colleague with amnesia. Every. Single. Session. + +## Enter Intent Agents + +With Intent v2.1.0, two simple commands change everything: + +```bash +$ intent agents init # Initialize agent configuration +$ intent agents install intent # Install the Intent agent +``` + +Now Claude instantly knows: +- What steel threads are and how to use them +- All Intent commands and their purposes +- Your project organization patterns +- Backlog.md integration +- Best practices for Intent development + +## The Transformation + +### Before Intent Agent + +``` +You: "I need to add user authentication" +Claude: "I'll help you create an authentication system. What framework are you using?" +You: "The project uses Intent for management..." +Claude: "What's Intent? How should I structure the authentication code?" +[Several minutes of back-and-forth] +Claude: "Should I create a new module in src/auth?" +You: "No, we need to create a steel thread first..." +[More explanation needed] +``` + +### With Intent Agent + +``` +You: "I need to add user authentication" +Claude: "I'll help you create a steel thread for authentication: + + intent st new 'User Authentication System' + +This creates ST0042. Now let's document the intent. Based on Intent best practices: + +1. What authentication method? (JWT, sessions, OAuth?) +2. What are your security requirements? +3. Do you need multi-device support? + +Once we clarify the intent, I'll help you: +- Break this into Backlog tasks +- Design the implementation approach +- Maintain proper Intent documentation throughout + +Would you like me to also check for related steel threads first?" +``` + +The conversation starts at an architectural level, not a remedial one. + +## How Intent Agents Work + +### Architecture + +Intent agents are Claude Code sub-agents - specialized AI assistants with focused knowledge: + +``` +~/.claude/agents/ +├── intent.md # Intent methodology expert +└── elixir.md # Elixir patterns expert (bonus!) +``` + +### The Intent Agent + +When installed, the Intent agent provides Claude with: + +```markdown +--- +name: intent +description: Intent-aware development assistant +tools: Bash, Read, Write, Edit, Grep +--- + +You are an Intent-aware development assistant specialized in: +- Steel thread methodology +- Intent project management +- Backlog task integration +- Structured documentation + +## Key Commands +- intent st new "Title" - Create steel thread +- intent bl task new ST#### "Task" - Create linked task +- intent agents status - Check agent health +[... comprehensive Intent knowledge ...] +``` + +### Installation and Management + +```bash +# See what's available +$ intent agents list +Available Agents: + intent - Intent-aware development assistant [NOT INSTALLED] + elixir - Elixir code doctor with Usage Rules [NOT INSTALLED] + +# Install the Intent agent +$ intent agents install intent +Installing agent: intent +Installed successfully + +# Install all available agents +$ intent agents install --all + +# Check agent health +$ intent agents status +Checking agent status... +intent [OK] +elixir [OK] + +# Keep agents updated +$ intent agents sync +Syncing installed agents... +intent [UP TO DATE] +``` + +## Real-World Impact + +### Scenario 1: New Feature Development + +**Without agent:** +- 10 minutes explaining Intent +- 5 minutes clarifying commands +- Back-and-forth on structure +- Finally start actual work + +**With agent:** +- 0 minutes on Intent explanation +- Immediate strategic discussion +- Claude suggests proper structure +- Straight to implementation + +### Scenario 2: Code Review + +**Without agent:** +``` +You: "Review this steel thread implementation" +Claude: "What's a steel thread? What should I look for?" +``` + +**With agent:** +``` +You: "Review this steel thread implementation" +Claude: "I'll review ST0043 against Intent best practices: +- ✓ Clear objective documented +- ✓ Context provides business rationale +- ⚠ Implementation notes could use more detail +- ✓ Tasks properly linked to Backlog +- Suggestion: Add decision rationale for choosing PostgreSQL" +``` + +### Scenario 3: Onboarding + +New developer joins your Intent-based project: + +```bash +# They install Intent and the agent +$ intent agents install intent + +# Now their Claude understands your entire methodology +# No senior dev time needed for Intent training +# They're productive immediately +``` + +## Custom Project Agents + +Beyond the built-in Intent agent, create project-specific agents: + +```bash +$ mkdir -p intent/agents/myproject +$ cat > intent/agents/myproject/agent.md << 'EOF' +--- +name: myproject +description: Project-specific conventions +tools: Bash, Read, Write +--- + +You understand our project-specific conventions: + +## API Standards +- All endpoints: /api/v2/{resource} +- Authentication: Bearer tokens +- Responses: JSend format +- Errors: RFC 7807 Problem Details + +## Testing Requirements +- Minimum 80% coverage +- Integration tests for all endpoints +- Load tests for critical paths + +## Git Workflow +- Feature branches: feature/JIRA-123-description +- Commits: conventional commits format +- PRs: require 2 approvals +EOF + +$ intent agents install myproject +``` + +Now Claude knows YOUR specific rules too. + +## The Compound Effect + +Intent agents don't just save time – they fundamentally change what's possible: + +### 1. Higher-Level Conversations +Skip the basics, discuss architecture and design decisions immediately. + +### 2. Consistent Practices +Every Claude session follows the same patterns, maintaining code quality. + +### 3. Knowledge Preservation +Team conventions are encoded, not just documented. + +### 4. Accelerated Onboarding +New team members get AI that already knows your ways. + +### 5. Evolution Support +As Intent evolves, update the agent – all future sessions improve. + +## Implementation Details + +### Manifest-Based Tracking + +Intent tracks installed agents with checksums: + +```json +{ + "installed": [{ + "name": "intent", + "source": "global", + "checksum": "sha256:abc123...", + "installed_at": "2025-07-27T10:00:00Z" + }] +} +``` + +### Modification Protection + +```bash +$ intent agents status +Checking agent status... +intent [MODIFIED] - Local changes detected + +$ intent agents sync +Warning: Agent has been modified locally +Overwrite local changes? [y/N] +``` + +### Integration Points + +- **intent init**: Detects Claude, offers agent installation +- **intent doctor**: Checks agent health +- **intent upgrade**: Preserves agents during migration + +## Getting Started + +### For New Projects + +```bash +$ intent init "My Project" +Claude Code detected! +Would you like to install the Intent sub-agent? [Y/n] y +Intent agent installed successfully! +``` + +### For Existing Projects + +```bash +$ intent agents install intent +$ intent agents status +``` + +### Daily Workflow + +1. Agents are installed once, work forever +2. Claude automatically loads agent knowledge +3. You focus on building, not explaining + +## The Future + +This is just the beginning. Imagine: + +- **Language-specific agents**: Python, Go, Rust experts +- **Framework agents**: React, Django, Rails specialists +- **Domain agents**: Financial, healthcare, gaming knowledge +- **Team agents**: Your specific architectural decisions + +Each agent layers additional expertise while maintaining Intent's structured approach. + +## Start Today + +If you're using Intent and Claude Code: + +```bash +$ intent agents install intent +``` + +That's it. Your next Claude session will be transformed. + +If you're not using Intent yet, [get started here](./0005-getting-started-with-intent.md). + +## Conclusion + +Intent agents solve the context problem permanently. No more explaining your methodology every session. No more reminding Claude about your commands. No more inconsistent suggestions. + +Just intelligent, context-aware assistance that understands your project as well as you do. + +The future of development isn't just AI-assisted – it's AI that truly understands your intent. + +[Back to Intent Blog Index](./README.md) \ No newline at end of file diff --git a/docs/blog/README.md b/docs/blog/README.md new file mode 100644 index 0000000..25a3e23 --- /dev/null +++ b/docs/blog/README.md @@ -0,0 +1,44 @@ +# Intent Blog Series + +A comprehensive blog series explaining the Intent methodology, from philosophy to practical implementation. + +## Posts + +1. **[The Motivation for Intent: Why Intention Matters](./0000-motivation-for-intent.md)** + Explores the fundamental challenge of maintaining developer intention in modern software development, especially when working with LLMs. + +2. **[Introduction to Intent](./0001-introduction-to-intent.md)** + Introduces Intent as a practical solution for intention-aware development, explaining its core components and principles. + +3. **[The Steel Thread Methodology](./0002-the-steel-thread-methodology.md)** + Deep dive into steel threads - self-contained units of work that capture clear intention and deliver tangible value. + +4. **[Intent Capture in Software Development](./0003-intent-capture-in-software-development.md)** + Practical techniques for capturing, preserving, and leveraging intention throughout the development process. + +5. **[LLM Collaboration with Intent](./0004-llm-collaboration-with-intent.md)** + How Intent's structure makes AI assistance more effective, transforming LLMs from code generators into development partners. + +6. **[Getting Started with Intent](./0005-getting-started-with-intent.md)** + Practical implementation guide with installation instructions, daily workflow, and real examples from creating this blog series. + +7. **[Next Steps and Future Work](./0006-next-steps-and-future-work.md)** + The roadmap for Intent's evolution, integration opportunities, and vision for the future of intention-aware development. + +8. **[Intent Agents: Supercharging Claude Code Collaboration](./0007-intent-agents-supercharge-claude.md)** + How Intent v2.1.0's agent system transforms AI pair programming by giving Claude permanent understanding of your methodology. + +## About This Series + +This blog series was created using Intent itself (ST0013), demonstrating the methodology in action. The posts progress from conceptual understanding to practical implementation, suitable for developers, team leads, and anyone interested in improving their development process. + +## Key Concepts + +- **Steel Threads**: Complete, minimal paths through your system that capture intention +- **Intention-First Development**: Starting with "why" before "what" +- **LLM Collaboration**: Using structured documentation to enhance AI assistance +- **Two-Tier Tracking**: Strategic intent at thread level, tactical execution at task level + +## Getting Started + +Ready to implement Intent in your projects? Start with [Getting Started with Intent](./0005-getting-started-with-intent.md) for practical instructions. diff --git a/RELEASE_NOTES_v1.2.1.md b/docs/releases/1.2.1/RELEASE_NOTES.md similarity index 100% rename from RELEASE_NOTES_v1.2.1.md rename to docs/releases/1.2.1/RELEASE_NOTES.md diff --git a/docs/releases/2.0.0/RELEASE_NOTES.md b/docs/releases/2.0.0/RELEASE_NOTES.md new file mode 100644 index 0000000..308f26d --- /dev/null +++ b/docs/releases/2.0.0/RELEASE_NOTES.md @@ -0,0 +1,234 @@ +# Intent v2.0.0 Release Notes + +## Release Date: July 17, 2025 (Updated: July 27, 2025) + +## Overview + +Intent v2.0.0 marks a major milestone in the evolution of the Steel Thread Process tooling. This release represents a complete rebrand from STP to Intent, reflecting the tool's core mission of capturing and preserving the intention behind software development decisions. + +## Major Changes + +### 🤖 Claude Code Sub-Agent Integration + +Intent v2.0.0 includes full integration with Claude Code's sub-agent system, revolutionizing AI-assisted development: + +- **Intent Agent**: Pre-built agent that understands steel threads, Intent commands, and project structure +- **Elixir Agent**: Specialized agent for Elixir development with Usage Rules and Ash/Phoenix patterns +- **Agent Management**: Complete suite of commands for installing, syncing, and managing agents +- **Seamless Integration**: Agents work automatically with Claude Code sessions + +### 🚀 Complete Rebrand: STP → Intent + +The project has been renamed from "STP" (Steel Thread Process) to "Intent" to better communicate its purpose. While the Steel Thread methodology remains unchanged, the tooling now has a name that immediately conveys its value proposition. + +### 📁 Simplified Directory Structure + +The project structure has been flattened and simplified: + +**Before (STP v1.x):** +``` +stp/ +├── bin/ # Executables mixed with project +├── prj/ # Nested project directory +│ ├── st/ # Steel threads +│ └── wip.md # Work in progress +├── eng/ # Engineering docs +└── usr/ # User docs +``` + +**After (Intent v2.0.0):** +``` +. +├── agents/ # Claude Code sub-agents (global) +│ ├── intent/ # Intent methodology agent +│ └── elixir/ # Elixir code doctor agent +├── bin/ # Tool executables (top-level) +├── intent/ # Project artifacts (flattened) +│ ├── agents/ # Project-specific sub-agents +│ ├── st/ # Steel threads +│ ├── eng/ # Engineering docs +│ ├── usr/ # User docs +│ └── wip.md # Work in progress +└── tests/ # Test suite +``` + +### 🔧 New Commands + +- **`intent bootstrap`**: One-command global setup with clear instructions +- **`intent doctor`**: Comprehensive diagnostics to troubleshoot issues +- **`intent agents`**: Complete agent management system: + - `intent agents list` - Show available and installed agents + - `intent agents install` - Install agents to Claude configuration + - `intent agents sync` - Update agents while preserving modifications + - `intent agents uninstall` - Remove Intent-managed agents + - `intent agents show` - Display agent details and metadata + - `intent agents status` - Check agent health and integrity + +### 📋 JSON Configuration + +Configuration has moved from YAML to JSON format with proper local/global hierarchy: +- Local: `.intent/config.json` +- Global: `~/.config/intent/config.json` (follows XDG standard) + +### ✅ Full Backwards Compatibility + +- `stp` command symlinked to `intent` +- Automatic migration via `intent upgrade` +- All existing projects continue to work + +## Installation & Migration + +### New Installation + +```bash +# Clone the repository +git clone https://github.com/matthewsinclair/intent.git +cd intent + +# Add to PATH +export PATH="$PATH:$(pwd)/bin" + +# Bootstrap global configuration +intent bootstrap + +# Verify installation +intent doctor +``` + +### Migration from STP v1.x + +```bash +# From your existing STP project +intent upgrade + +# The upgrade will: +# 1. Detect your current version +# 2. Create backups +# 3. Migrate directory structure +# 4. Update configuration format +# 5. Preserve all content +``` + +## Breaking Changes + +While we maintain backwards compatibility, these changes affect the underlying structure: + +1. **Directory paths have changed**: + - `stp/prj/st/` → `intent/st/` + - `stp/prj/wip.md` → `intent/wip.md` + - Configuration in `.intent/` not `.stp/` + +2. **Configuration format**: + - YAML → JSON + - New global config location + +3. **Repository location**: + - GitHub: `matthewsinclair/stp` → `matthewsinclair/intent` + +## New Features + +### Claude Code Agent Integration + +- **Pre-built Agents**: Intent and Elixir agents ready to use +- **Agent Management**: Full lifecycle commands (install, sync, uninstall) +- **Project & Global Agents**: Support for both scopes +- **Integrity Checking**: Checksums track modifications +- **Seamless Claude Integration**: Agents automatically enhance Claude sessions + +**Quick Start with Agents:** +```bash +# Install the Intent agent +intent agents install intent + +# Check agent status +intent agents status + +# Now Claude understands Intent methodology! +``` + +### Enhanced User Experience + +- **Better error messages**: Clear, actionable feedback +- **Improved help system**: Context-aware help +- **Streamlined commands**: Consistent interface +- **Progress indicators**: Visual feedback during operations + +### Developer Experience + +- **Comprehensive test suite**: Full coverage with BATS +- **GitHub Actions CI/CD**: Automated testing +- **Example projects**: Migration demonstrations +- **Enhanced documentation**: Updated for Intent + +### Technical Improvements + +- **Robust migration**: Fail-forward approach +- **Better path handling**: Works in more environments +- **Dependency management**: Clear requirements (jq, backlog.md) +- **Configuration validation**: Catches errors early +- **Agent manifest tracking**: JSON-based with checksums +- **50 new tests**: Complete agent system test coverage + +## Fixed Issues + +- GitHub Actions workflows now properly exclude library tests +- Symlink handling improved for cross-platform compatibility +- Test suite reliability enhanced +- Configuration loading hierarchy properly implemented +- Path resolution works correctly in all scenarios + +## Known Issues + +None at this time. All tests passing on Ubuntu and macOS. + +## Upgrading + +### From v1.2.1 to v2.0.0 + +1. **Backup your project** (automatic, but always good practice) +2. Run `intent upgrade` from your project root +3. Review the migration summary +4. Update any custom scripts to use `intent` instead of `stp` +5. Update your PATH if you had hardcoded the old location + +### Command Equivalents + +All commands remain the same, just replace `stp` with `intent`: + +| Old Command | New Command | +|-------------|-------------| +| `stp st new` | `intent st new` | +| `stp bl list` | `intent bl list` | +| `stp task create` | `intent task create` | +| `stp help` | `intent help` | + +## Future Roadmap + +With the rebrand complete and Claude Code integration shipped, Intent is positioned for: + +- **Q3 2025**: Extended agent ecosystem (language-specific agents) +- **Q4 2025**: Team collaboration features +- **2026**: Enterprise scalability and custom agent marketplaces + +## Support + +- **Documentation**: Updated user and reference guides +- **Issues**: Report bugs at https://github.com/matthewsinclair/intent/issues +- **Help**: Run `intent help` or `intent doctor` + +## Contributors + +- Matthew Sinclair - Project creator and maintainer +- Claude (Anthropic) - Development assistance + +## Thank You + +Thank you to all early adopters of STP. Your feedback shaped Intent into what it is today. The Steel Thread Process remains at the heart of Intent, now with tooling that better reflects its purpose. + +--- + +**Start capturing intention today with Intent v2.0.0!** + +```bash +intent st new "My first intentional development" +``` \ No newline at end of file diff --git a/docs/releases/2.1.0/RELEASE_NOTES.md b/docs/releases/2.1.0/RELEASE_NOTES.md new file mode 100644 index 0000000..58dd82a --- /dev/null +++ b/docs/releases/2.1.0/RELEASE_NOTES.md @@ -0,0 +1,99 @@ +# Intent v2.1.0 Release Notes + +## Release Date: July 27, 2025 + +## Overview + +Intent v2.1.0 is a maintenance release that enhances the Agent system introduced in v2.0.0 and fixes critical bugs in the upgrade process. + +## What's Changed + +### 🐛 Critical Bug Fixes + +#### Agent Directory Structure + +Fixed a critical bug where the upgrade process was creating agent directories in the wrong location: + +- **Problem**: `intent upgrade` was creating `./agents/` at the project root +- **Solution**: Agent directories are now correctly placed in `./intent/agents/` +- **Impact**: Projects upgraded with the buggy version will have an incorrect `agents/` directory at root + +**To fix affected projects:** + +```bash +# Remove the incorrectly placed directory +rm -rf ./agents + +# The correct location is: +# ./intent/agents/ (for project-specific agents) +``` + +### 🔧 Agent System Improvements + +- **`intent agents init`**: Now required before installing agents (introduced in v2.0.0) +- **Project agent initialization**: Fixed to create directories in the correct location +- **Upgrade process**: No longer incorrectly preserves root-level agent directories + +### 📋 Updated Components + +- `bin/intent_helpers`: Fixed `migrate_v2_0_to_v2_1()` function +- `bin/intent_agents`: Fixed project agent initialization path +- `bin/intent_upgrade`: Removed incorrect agent directory preservation + +## Agent Directory Structure (Clarified) + +The correct agent directory structure is: + +``` +$INTENT_HOME/agents/ # Global agents shipped with Intent +./intent/agents/ # Project-specific custom agents +~/.claude/agents/ # Where Claude Code reads installed agents +~/.intent/agents/ # Intent's tracking of installed agents +``` + +## Installation & Upgrade + +### Upgrading from v2.0.0 + +```bash +# Update Intent installation +cd /path/to/intent +git pull + +# Upgrade your project +cd /path/to/your/project +intent upgrade +``` + +### For Projects with Incorrect Agent Directories + +If you previously ran `intent upgrade` and have an `agents/` directory at your project root: + +```bash +# Check if you have the incorrect structure +ls -la ./agents + +# If it exists, remove it (the correct location is ./intent/agents/) +rm -rf ./agents +``` + +## Testing + +All 165 tests pass, including: + +- Agent directory creation tests +- Upgrade process tests +- Agent initialization tests + +## Known Issues + +None at this time. + +## Contributors + +- Matthew Sinclair (@matts) + +## Support + +- **Issues**: <https://github.com/matthewsinclair/intent/issues> +- **Help**: Run `intent help agents` for agent-specific help diff --git a/docs/releases/2.2.0/RELEASE_NOTES.md b/docs/releases/2.2.0/RELEASE_NOTES.md new file mode 100644 index 0000000..464b6cd --- /dev/null +++ b/docs/releases/2.2.0/RELEASE_NOTES.md @@ -0,0 +1,102 @@ +# Intent v2.2.0 Release Notes + +## Overview + +Intent v2.2.0 introduces the `fileindex` command, a powerful file indexing and tracking system that enables systematic progress tracking through large codebases. This release also enhances the Elixir agent with systematic code review capabilities. + +## New Features + +### 1. Fileindex Command + +The `intent fileindex` command provides a persistent checkbox-based file tracking system: + +- **File Discovery**: Automatically find and index files based on patterns +- **Progress Tracking**: Mark files as checked/unchecked with persistent state +- **Flexible Integration**: Works both within Intent projects and standalone +- **Toggle Functionality**: Quick marking/unmarking of files with `-X` flag +- **Explicit State Control**: New `-C` (check) and `-U` (uncheck) flags for setting specific states + +#### Key Commands: +```bash +# Create an index of all Elixir files recursively +intent fileindex -r -i project.index + +# Check a specific file (mark as completed) +intent fileindex -i project.index -C lib/my_app/user.ex + +# Uncheck a specific file (mark as pending) +intent fileindex -i project.index -U lib/my_app/user.ex + +# Toggle a file's checked state +intent fileindex -i project.index -X lib/my_app/router.ex + +# View current index with verbose output +intent fileindex -v -i project.index +``` + +### 2. Enhanced Elixir Agent + +The Elixir agent now supports systematic code review workflows using the fileindex command: + +- **Module-based Reviews**: Review entire Elixir modules systematically +- **Path Flexibility**: Accept both module names (e.g., `MyApp.Users`) and filesystem paths +- **Progress Tracking**: Automatically track which files have been reviewed +- **Smart Path Mapping**: Intelligent conversion between Elixir module names and file paths + +#### Example Usage: +``` +Review the MyApp.Users module systematically, checking each file as you complete it +``` + +## Improvements + +### Upgrade Command +- Updated to support migrations to v2.2.0 +- Improved version detection and upgrade path handling +- Better support for incremental upgrades (2.0.0 → 2.1.0 → 2.2.0) + +### Bash Compatibility +- Fixed macOS compatibility issues with associative arrays +- Replaced `readarray` with portable alternatives +- Improved shell script portability + +## Installation + +### New Installation +```bash +curl -sSL https://intent.dev/install.sh | bash +``` + +### Upgrade from Previous Versions +```bash +intent upgrade +``` + +## Migration Notes + +Projects upgrading from v2.1.0 will have their configuration automatically updated to v2.2.0. The fileindex command will be immediately available after upgrade. + +## Breaking Changes + +None. This release is fully backward compatible with v2.1.0. + +## Bug Fixes + +- Fixed bash compatibility issues for macOS users +- Improved error handling in test framework +- Better handling of edge cases in file operations + +## Credits + +This release includes contributions from the Intent community. Special thanks to all testers and users who provided feedback on the fileindex functionality. + +## Next Steps + +After upgrading to v2.2.0: + +1. Run `intent help fileindex` to learn about the new command +2. Try `intent fileindex --demo` to see it in action +3. Use `intent agents list` to see the updated Elixir agent capabilities +4. Run `intent doctor` to verify your installation + +For questions or issues, please visit: https://github.com/intent-dev/intent/issues \ No newline at end of file diff --git a/docs/releases/2.2.1/RELEASE_NOTES.md b/docs/releases/2.2.1/RELEASE_NOTES.md new file mode 100644 index 0000000..8a83767 --- /dev/null +++ b/docs/releases/2.2.1/RELEASE_NOTES.md @@ -0,0 +1,114 @@ +# Intent v2.2.1 Release Notes + +## Overview + +Intent v2.2.1 is a maintenance release that significantly improves tool dependency management and error handling. This release addresses silent failures when required tools are missing and provides comprehensive guidance for users to resolve dependency issues. + +## Key Improvements + +### 🔧 Centralized Version Management + +- **Single Source of Truth**: Version is now managed through a `VERSION` file at the project root +- **Consistent Updates**: All scripts dynamically read from the VERSION file, eliminating version inconsistencies +- **Easier Maintenance**: Future version bumps only require updating one file + +### 🛠️ Comprehensive Tool Dependency Checking + +The `intent doctor` command now provides detailed dependency analysis: + +- **Categorized Tools**: Dependencies are organized into required, core, and optional categories +- **Platform-Specific Instructions**: Installation commands tailored for macOS, Linux distributions, and other systems +- **Clear Severity Levels**: Distinguish between critical errors, warnings, and informational messages + +### 🚫 No More Silent Failures + +- **jq Dependency Handling**: All commands that require jq now fail gracefully with clear error messages +- **Agent Operations**: Fixed silent failures during agent installation, sync, and management when jq is missing +- **Actionable Error Messages**: Every error now includes specific steps to resolve the issue + +## What's New + +### Added +- `VERSION` file for centralized version management +- `get_intent_version()` function in intent_helpers +- Comprehensive tool dependency checking in `intent doctor` +- Platform-specific installation instructions for all tools +- Better error handling for missing dependencies + +### Changed +- Tool dependencies categorized as required, core, and optional +- Enhanced error messages with installation instructions +- All scripts now use centralized version management + +### Fixed +- Silent failures when jq is missing during agent operations +- Missing error messages for required tool dependencies +- Inadequate installation guidance for different platforms +- Version number inconsistencies across scripts +- **Upgrade Path**: Added missing upgrade path from v2.2.0 to v2.2.1 + +## Installation & Upgrade + +### For New Users +```bash +git clone https://github.com/matthewsinclair/intent.git +cd intent +export PATH="$PATH:$(pwd)/bin" +intent bootstrap +``` + +### For Existing Users +```bash +cd /path/to/intent +git pull origin main +intent doctor # Check for any missing dependencies +``` + +## Tool Requirements + +### Required Tools +- **bash**: Shell interpreter +- **sed**: Text processing +- **grep**: Pattern matching +- **mkdir**: Directory creation +- **jq**: JSON processing (critical for configs and agents) + +### Optional Tools +- **backlog**: Task management system +- **bats**: Test framework +- **sha256sum/shasum**: Checksum verification for agents + +Run `intent doctor` to check your environment and get installation instructions for any missing tools. + +## Testing + +After upgrading, verify your installation: + +```bash +# Check version +intent --version # Should show 2.2.1 + +# Run diagnostics +intent doctor + +# Test in verbose mode +intent doctor --verbose +``` + +## Migration Notes + +No breaking changes in this release. The version management improvements are backward compatible. + +## Support + +For issues or questions: +- GitHub Issues: https://github.com/matthewsinclair/intent/issues +- Documentation: Run `intent help` for command documentation + +## Contributors + +This release was developed by Matthew Sinclair with automated assistance. + +--- + +*Intent v2.2.1 - Structured Development Process with Improved Reliability* \ No newline at end of file diff --git a/docs/releases/2.3.2/RELEASE_NOTES.md b/docs/releases/2.3.2/RELEASE_NOTES.md new file mode 100644 index 0000000..0c1fe0b --- /dev/null +++ b/docs/releases/2.3.2/RELEASE_NOTES.md @@ -0,0 +1,200 @@ +# Intent v2.3.2 Release Notes + +## Overview + +Intent v2.3.2 enhances the Elixir subagent with comprehensive antipattern detection capabilities. This release helps Elixir developers write cleaner, more maintainable code by automatically detecting and providing remediation for 24 common antipatterns sourced from the official Elixir documentation. + +## Key Features + +### 🔍 Comprehensive Antipattern Detection + +The Elixir Doctor now detects and helps remediate 24 common Elixir antipatterns across four categories: + +#### Code-related Antipatterns (9 patterns) + +- Comments overuse +- Complex `else` clauses in `with` +- Complex extractions in clauses +- Dynamic atom creation +- Long parameter list +- Namespace trespassing +- Non-assertive map access +- Non-assertive pattern matching +- Non-assertive truthiness + +#### Design-related Antipatterns (6 patterns) + +- Alternative return types +- Boolean obsession +- Exceptions for control-flow +- Primitive obsession +- Unrelated multi-clause function +- Using application configuration for libraries + +#### Process-related Antipatterns (4 patterns) + +- Code organisation by process +- Scattered process interfaces +- Sending unnecessary data +- Unsupervised processes + +#### Meta-programming Antipatterns (5 patterns) + +- Compile-time dependencies +- Large code generation +- Unnecessary macros +- `use` instead of `import` +- Untracked compile-time dependencies + +### 📊 Enhanced Code Review Process + +- **Integrated Detection**: Antipattern checking is now part of the systematic code review workflow +- **Detailed Reports**: Clear reporting with line numbers and specific remediation suggestions +- **Prioritized Fixes**: Antipatterns are categorized and prioritized by impact +- **Prevention Principles**: Key principles to help avoid antipatterns in future code + +## What's New + +### Added + +- Comprehensive antipattern detection section in Elixir subagent +- Full antipattern documentation at `intent/plugins/claude/subagents/elixir/antipatterns.md` +- Antipattern review workflow with systematic approach +- Example usage commands and report formats +- Key principles for antipattern prevention +- Migration function for v2.3.1 to v2.3.2 + +### Changed + +- Enhanced Elixir subagent with antipattern detection capabilities +- Updated systematic review template to include antipattern analysis +- Elixir Doctor now automatically checks for antipatterns during code reviews + +### Technical Improvements + +- Better code quality guidance through antipattern detection +- More comprehensive code review process +- Proactive detection of common Elixir mistakes + +## Installation & Upgrade + +### For New Users + +```bash +git clone https://github.com/matthewsinclair/intent.git +cd intent +export PATH="$PATH:$(pwd)/bin" +intent bootstrap +``` + +### For Existing Users + +```bash +cd /path/to/intent +git pull origin main +intent upgrade # Automatically handles 2.3.1 to 2.3.2 migration +intent claude subagents sync # Update installed agents +``` + +## Using Antipattern Detection + +### Check a Single File + +```bash +# Ask the Elixir agent to check for antipatterns +"Check lib/my_app/user.ex for antipatterns" +``` + +### Review an Entire Module + +```bash +# Review a module for antipatterns +"Review MyApp.Accounts for common antipatterns" +``` + +### Focus on Specific Categories + +```bash +# Check for specific antipattern types +"Check for process-related antipatterns in lib/my_app/" +``` + +### Combined with Full Review + +```bash +# Complete Elixir Doctor review including antipatterns +"Apply Elixir Doctor and check for antipatterns in MyApp.Users" +``` + +## Example Antipattern Report + +The Elixir Doctor provides detailed reports: + +``` +## Antipattern Analysis + +Found 4 antipatterns in MyApp.Users: + +### Code Antipatterns (2) +1. **Non-assertive map access** (line 45) + - Using `user[:email]` when email is required + - Remediation: Use `user.email` for required fields + +2. **Long parameter list** (line 78) + - Function has 7 parameters + - Remediation: Group related params into maps/structs + +### Design Antipatterns (1) +1. **Boolean obsession** (line 123) + - Using `admin: true, editor: true` options + - Remediation: Use `:role` atom instead + +### Process Antipatterns (1) +1. **Scattered process interfaces** (lines 200-250) + - Direct GenServer.call/2 usage in multiple places + - Remediation: Centralize in single interface module +``` + +## Testing + +After upgrading, verify the new functionality: + +```bash +# Check version +intent --version # Should show 2.3.2 + +# Update agents +intent claude subagents sync + +# Verify elixir agent is updated +intent claude subagents show elixir | grep antipattern +``` + +## Migration Notes + +This release includes a smooth migration path from v2.3.1: + +- Version configuration is automatically updated +- Existing Elixir subagent installations are updated via `intent claude subagents sync` +- No breaking changes or manual intervention required + +## Documentation + +- **Full Antipattern Reference**: `intent/plugins/claude/subagents/elixir/antipatterns.md` +- **Elixir Agent Documentation**: Run `intent claude subagents show elixir` +- **Intent Help**: Run `intent help` for general command documentation + +## Support + +For issues or questions: + +- GitHub Issues: <https://github.com/matthewsinclair/intent/issues> +- Documentation: Run `intent help` for command documentation + +## Contributors + +This release was developed by Matthew Sinclair with the antipatterns documentation sourced from the official Elixir documentation at hexdocs.pm. + +--- + +*Intent v2.3.2 - Structured Development Process with Enhanced Elixir Code Quality* diff --git a/examples/hello-world/.intent/config.json b/examples/hello-world/.intent/config.json new file mode 100644 index 0000000..c337970 --- /dev/null +++ b/examples/hello-world/.intent/config.json @@ -0,0 +1,7 @@ +{ + "intent_version": "2.1.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "Intent User", + "editor": "vim" +} \ No newline at end of file diff --git a/examples/hello-world/README.md b/examples/hello-world/README.md new file mode 100644 index 0000000..0fd2f6c --- /dev/null +++ b/examples/hello-world/README.md @@ -0,0 +1,61 @@ +# Hello World - Intent v2.1.0 Example + +This is an example project demonstrating the Intent v2.1.0 structure. + +## Project Structure + +``` +hello-world/ +├── .intent/ +│ └── config.json # JSON configuration (new in v2.0.0) +├── intent/ # Flattened structure (was stp/) +│ ├── st/ # Steel threads (was stp/prj/st/) +│ │ ├── ST0001/ +│ │ └── ST0002/ +│ ├── eng/ # Engineering docs +│ ├── ref/ # Reference docs (was usr/) +│ ├── llm/ # LLM context +│ └── _archive/ # Archived content +└── backlog/ # Task management +``` + +## Key Differences from v1.x + +1. **JSON Config**: Uses `.intent/config.json` instead of YAML +2. **Flattened Paths**: `intent/st/` instead of `stp/prj/st/` +3. **Tool Separation**: Executables in `bin/`, templates in `lib/` +4. **Renamed Directories**: `usr/` → `ref/` + +## Configuration + +The `.intent/config.json` file: +```json +{ + "intent_version": "2.1.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "Intent User", + "editor": "vim" +} +``` + +## Usage + +After installing Intent v2.1.0: + +```bash +# Initialize a new project +intent init + +# Initialize agent configuration (new in v2.1.0) +intent agents init + +# Create a steel thread +intent st new "My Feature" + +# Check status +intent status + +# Run doctor for diagnostics +intent doctor +``` \ No newline at end of file diff --git a/examples/hello-world/intent/st/ST0001/design.md b/examples/hello-world/intent/st/ST0001/design.md new file mode 100644 index 0000000..b505159 --- /dev/null +++ b/examples/hello-world/intent/st/ST0001/design.md @@ -0,0 +1,46 @@ +--- +verblock: "01 Jul 2025:v0.1: Intent User - Initial version" +intent_version: 2.0.0 +--- +# ST0001: Design Document + +## Overview + +Design for the hello-world project demonstrating Intent v2.0.0 structure. + +## Key Design Decisions + +### 1. JSON Configuration +- Moved from YAML to JSON for configuration +- No external dependencies needed (can parse with sed/grep) +- Cleaner, more standard format + +### 2. Flattened Structure +``` +Old: stp/prj/st/ +New: intent/st/ + +Old: stp/eng/ +New: intent/eng/ + +Old: stp/usr/ +New: intent/ref/ +``` + +### 3. Tool Separation +``` +Tool components: +- bin/ (executables) +- lib/ (templates, resources) + +Project artifacts: +- intent/ (steel threads, engineering docs) +- backlog/ (task management) +``` + +## Benefits + +1. **Clarity**: Clear separation between tool and usage +2. **Simplicity**: Flattened structure is easier to navigate +3. **Deployment**: Can deploy just bin/ and lib/ for the tool +4. **Flexibility**: Projects can customize intent_dir and backlog_dir \ No newline at end of file diff --git a/examples/hello-world/intent/st/ST0001/info.md b/examples/hello-world/intent/st/ST0001/info.md new file mode 100644 index 0000000..33547c4 --- /dev/null +++ b/examples/hello-world/intent/st/ST0001/info.md @@ -0,0 +1,37 @@ +--- +verblock: "01 Jul 2025:v0.1: Intent User - Initial version" +intent_version: 2.1.0 +status: Completed +created: 20250701 +completed: 20250705 +--- +# ST0001: Hello World Project Setup + +- **Status**: Completed +- **Created**: 2025-07-01 +- **Completed**: 2025-07-05 +- **Author**: Intent User + +## Objective + +Set up the initial hello-world project using Intent v2.0.0 structure. + +## Context + +This example demonstrates the new v2.0.0 structure with: +- JSON configuration instead of YAML +- Flattened directory structure (intent/st/ not stp/prj/st/) +- Tool executables in top-level bin/ +- Templates in top-level lib/templates/ +- Reference docs in intent/ref/ (was usr/) + +## Related Steel Threads + +None - this is the first steel thread. + +## Context for LLM + +This is a clean v2.0.0 project showing the target structure after migration. Key differences: +1. Configuration is JSON-based +2. Directory structure is flattened +3. Clear separation of tool (bin/, lib/) from project artifacts (intent/, backlog/) \ No newline at end of file diff --git a/examples/hello-world/intent/st/ST0002/info.md b/examples/hello-world/intent/st/ST0002/info.md new file mode 100644 index 0000000..5242612 --- /dev/null +++ b/examples/hello-world/intent/st/ST0002/info.md @@ -0,0 +1,25 @@ +--- +verblock: "10 Jul 2025:v0.1: Intent User - Initial version" +intent_version: 2.1.0 +status: In Progress +created: 20250710 +completed: +--- +# ST0002: Implement Core Feature + +- **Status**: In Progress +- **Created**: 2025-07-10 +- **Completed**: +- **Author**: Intent User + +## Objective + +Implement a core feature to demonstrate the Intent workflow. + +## Context + +This steel thread shows how active development works in the v2.0.0 structure. + +## Related Steel Threads + +- ST0001: Hello World Project Setup (established structure) \ No newline at end of file diff --git a/examples/hello-world/intent/st/steel_threads.md b/examples/hello-world/intent/st/steel_threads.md new file mode 100644 index 0000000..c4b6cf8 --- /dev/null +++ b/examples/hello-world/intent/st/steel_threads.md @@ -0,0 +1,28 @@ +--- +verblock: "10 Jul 2025:v0.1: Intent User - Initial version" +intent_version: 2.0.0 +--- +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Hello World Project Setup | Completed | 2025-07-01 | 2025-07-05 | +| ST0002 | Implement Core Feature | In Progress | 2025-07-10 | | +<!-- END: STEEL_THREAD_INDEX --> + +## Status Definitions + +- **Not Started**: Steel thread has been created but work has not begun +- **In Progress**: Active development is underway +- **Completed**: All objectives have been achieved +- **On Hold**: Work has been paused +- **Cancelled**: Steel thread will not be completed + +## Notes + +This is a v2.0.0 project using the Intent CLI. The steel thread methodology remains the same, but the tool and structure have been modernized. \ No newline at end of file diff --git a/examples/v0.0.0-project/.gitignore b/examples/v0.0.0-project/.gitignore new file mode 100644 index 0000000..982692c --- /dev/null +++ b/examples/v0.0.0-project/.gitignore @@ -0,0 +1,18 @@ +# Intent/STP specific +.intent/local.json +*.bak +.backup_* + +# Backlog.md +backlog/.backlog-md/ + +# OS specific +.DS_Store +Thumbs.db + +# Editor specific +*.swp +*.swo +*~ +.vscode/ +.idea/ diff --git a/examples/v0.0.0-project/.intent/config.json b/examples/v0.0.0-project/.intent/config.json new file mode 100644 index 0000000..bbd7b8d --- /dev/null +++ b/examples/v0.0.0-project/.intent/config.json @@ -0,0 +1,7 @@ +{ + "version": "2.0.0", + "project_name": "v0.0.0-project", + "author": "matts", + "created": "2025-07-17", + "st_prefix": "ST" +} \ No newline at end of file diff --git a/examples/v0.0.0-project/backup-20250717-000943/.stp-config b/examples/v0.0.0-project/backup-20250717-000943/.stp-config new file mode 100644 index 0000000..35cf793 --- /dev/null +++ b/examples/v0.0.0-project/backup-20250717-000943/.stp-config @@ -0,0 +1,14 @@ +# STP Configuration File +# This is the ancient v0.0.0 format using YAML-style configuration + +project_name: Ancient Example Project +author: Test User +created: 2023-01-01 + +# Directory structure settings +stp_dir: stp +st_dir: stp/prj/st + +# Tool settings +editor: vim +default_status: Not Started \ No newline at end of file diff --git a/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0001.md b/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0001.md new file mode 100644 index 0000000..24a5965 --- /dev/null +++ b/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0001.md @@ -0,0 +1,26 @@ +# ST0001: Initial Setup + +- **Status**: Completed +- **Created**: 2023-01-15 +- **Completed**: 2023-01-20 +- **Author**: Test User + +## Objective + +Set up the initial project structure and configuration. + +## Context + +This is a test steel thread from the ancient v0.0.0 format. Note that there is no YAML frontmatter - this was added in later versions. + +## Design + +The design uses the old nested structure with stp/prj/st/ for steel threads. + +## Implementation + +Basic implementation completed. + +## Results + +Successfully created initial structure. \ No newline at end of file diff --git a/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0002.md b/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0002.md new file mode 100644 index 0000000..7fb89d4 --- /dev/null +++ b/examples/v0.0.0-project/backup-20250717-000943/stp/prj/st/ST0002.md @@ -0,0 +1,21 @@ +# ST0002: Feature Implementation + +- **Status**: In Progress +- **Created**: 2023-02-01 +- **Completed**: +- **Author**: Test User + +## Objective + +Implement a new feature for testing migration. + +## Context + +This steel thread is in progress in the v0.0.0 format. + +## Tasks + +- [ ] Design the feature +- [ ] Implement core functionality +- [ ] Write tests +- [ ] Document usage \ No newline at end of file diff --git a/examples/v0.0.0-project/intent/st/ST0001/info.md b/examples/v0.0.0-project/intent/st/ST0001/info.md new file mode 100644 index 0000000..24a5965 --- /dev/null +++ b/examples/v0.0.0-project/intent/st/ST0001/info.md @@ -0,0 +1,26 @@ +# ST0001: Initial Setup + +- **Status**: Completed +- **Created**: 2023-01-15 +- **Completed**: 2023-01-20 +- **Author**: Test User + +## Objective + +Set up the initial project structure and configuration. + +## Context + +This is a test steel thread from the ancient v0.0.0 format. Note that there is no YAML frontmatter - this was added in later versions. + +## Design + +The design uses the old nested structure with stp/prj/st/ for steel threads. + +## Implementation + +Basic implementation completed. + +## Results + +Successfully created initial structure. \ No newline at end of file diff --git a/examples/v0.0.0-project/intent/st/ST0002/info.md b/examples/v0.0.0-project/intent/st/ST0002/info.md new file mode 100644 index 0000000..7fb89d4 --- /dev/null +++ b/examples/v0.0.0-project/intent/st/ST0002/info.md @@ -0,0 +1,21 @@ +# ST0002: Feature Implementation + +- **Status**: In Progress +- **Created**: 2023-02-01 +- **Completed**: +- **Author**: Test User + +## Objective + +Implement a new feature for testing migration. + +## Context + +This steel thread is in progress in the v0.0.0 format. + +## Tasks + +- [ ] Design the feature +- [ ] Implement core functionality +- [ ] Write tests +- [ ] Document usage \ No newline at end of file diff --git a/examples/v1.2.0-project/.gitignore b/examples/v1.2.0-project/.gitignore new file mode 100644 index 0000000..982692c --- /dev/null +++ b/examples/v1.2.0-project/.gitignore @@ -0,0 +1,18 @@ +# Intent/STP specific +.intent/local.json +*.bak +.backup_* + +# Backlog.md +backlog/.backlog-md/ + +# OS specific +.DS_Store +Thumbs.db + +# Editor specific +*.swp +*.swo +*~ +.vscode/ +.idea/ diff --git a/examples/v1.2.0-project/.intent/config.json b/examples/v1.2.0-project/.intent/config.json new file mode 100644 index 0000000..e879a01 --- /dev/null +++ b/examples/v1.2.0-project/.intent/config.json @@ -0,0 +1,7 @@ +{ + "version": "2.0.0", + "project_name": "v1.2.0-project", + "author": "matts", + "created": "2025-07-17", + "st_prefix": "ST" +} \ No newline at end of file diff --git a/examples/v1.2.0-project/backup-20250717-001326/stp/.config/version b/examples/v1.2.0-project/backup-20250717-001326/stp/.config/version new file mode 100644 index 0000000..25517f8 --- /dev/null +++ b/examples/v1.2.0-project/backup-20250717-001326/stp/.config/version @@ -0,0 +1 @@ +stp_version: 1.2.0 \ No newline at end of file diff --git a/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0001.md b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0001.md new file mode 100644 index 0000000..6ccafe6 --- /dev/null +++ b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0001.md @@ -0,0 +1,27 @@ +--- +stp_version: 1.2.0 +status: Completed +created: 20240115 +completed: 20240120 +--- +# ST0001: Project Setup + +- **Status**: Completed +- **Created**: 2024-01-15 +- **Completed**: 2024-01-20 +- **Author**: v1.2.0 User + +## Objective + +Initialize the project with v1.2.0 structure. + +## Context + +This example demonstrates the v1.2.0 format with: +- YAML frontmatter +- File-based steel threads (not directories) +- Version tracking in stp/.config/version + +## Implementation + +Created standard directory structure with configuration. \ No newline at end of file diff --git a/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0002.md b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0002.md new file mode 100644 index 0000000..8f14fe4 --- /dev/null +++ b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/ST0002.md @@ -0,0 +1,27 @@ +--- +stp_version: 1.2.0 +status: In Progress +created: 20240201 +completed: +--- +# ST0002: Feature Development + +- **Status**: In Progress +- **Created**: 2024-02-01 +- **Completed**: +- **Author**: v1.2.0 User + +## Objective + +Develop new feature with proper versioning. + +## Context + +This steel thread shows an in-progress item in v1.2.0 format. + +## Tasks + +- [x] Design approved +- [ ] Implementation +- [ ] Testing +- [ ] Documentation \ No newline at end of file diff --git a/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/steel_threads.md b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/steel_threads.md new file mode 100644 index 0000000..ac696ed --- /dev/null +++ b/examples/v1.2.0-project/backup-20250717-001326/stp/prj/st/steel_threads.md @@ -0,0 +1,23 @@ +--- +stp_version: 1.2.0 +--- +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Project Setup | Completed | 2024-01-15 | 2024-01-20 | +| ST0002 | Feature Development | In Progress | 2024-02-01 | | +<!-- END: STEEL_THREAD_INDEX --> + +## Status Definitions + +- **Not Started**: Steel thread has been created but work has not begun +- **In Progress**: Active development is underway +- **Completed**: All objectives have been achieved +- **On Hold**: Work has been paused +- **Cancelled**: Steel thread will not be completed \ No newline at end of file diff --git a/examples/v1.2.0-project/intent/st/ST0001/info.md b/examples/v1.2.0-project/intent/st/ST0001/info.md new file mode 100644 index 0000000..9d3b793 --- /dev/null +++ b/examples/v1.2.0-project/intent/st/ST0001/info.md @@ -0,0 +1,23 @@ +--- +intent_version: 2.0.0 +--- +# ST0001: Project Setup + +- **Status**: Completed +- **Created**: 2024-01-15 +- **Completed**: 2024-01-20 +- **Author**: v1.2.0 User + +## Objective + +Initialize the project with v1.2.0 structure. + +## Context + +This example demonstrates the v1.2.0 format with: +- YAML frontmatter +- File-based steel threads (not directories) +- Version tracking in stp/.config/version + +## Implementation + diff --git a/examples/v1.2.0-project/intent/st/ST0002/info.md b/examples/v1.2.0-project/intent/st/ST0002/info.md new file mode 100644 index 0000000..8f14fe4 --- /dev/null +++ b/examples/v1.2.0-project/intent/st/ST0002/info.md @@ -0,0 +1,27 @@ +--- +stp_version: 1.2.0 +status: In Progress +created: 20240201 +completed: +--- +# ST0002: Feature Development + +- **Status**: In Progress +- **Created**: 2024-02-01 +- **Completed**: +- **Author**: v1.2.0 User + +## Objective + +Develop new feature with proper versioning. + +## Context + +This steel thread shows an in-progress item in v1.2.0 format. + +## Tasks + +- [x] Design approved +- [ ] Implementation +- [ ] Testing +- [ ] Documentation \ No newline at end of file diff --git a/examples/v1.2.1-project/.gitignore b/examples/v1.2.1-project/.gitignore new file mode 100644 index 0000000..982692c --- /dev/null +++ b/examples/v1.2.1-project/.gitignore @@ -0,0 +1,18 @@ +# Intent/STP specific +.intent/local.json +*.bak +.backup_* + +# Backlog.md +backlog/.backlog-md/ + +# OS specific +.DS_Store +Thumbs.db + +# Editor specific +*.swp +*.swo +*~ +.vscode/ +.idea/ diff --git a/examples/v1.2.1-project/.intent/config.json b/examples/v1.2.1-project/.intent/config.json new file mode 100644 index 0000000..ffcf8cd --- /dev/null +++ b/examples/v1.2.1-project/.intent/config.json @@ -0,0 +1,7 @@ +{ + "version": "2.0.0", + "project_name": ".", + "author": "matts", + "created": "2025-07-17", + "st_prefix": "ST" +} diff --git a/examples/v1.2.1-project/CLAUDE.md b/examples/v1.2.1-project/CLAUDE.md new file mode 100644 index 0000000..be584aa --- /dev/null +++ b/examples/v1.2.1-project/CLAUDE.md @@ -0,0 +1,38 @@ +# . Project Guidelines + +This is an Intent v2.0.0 project (formerly STP). + +## Project Structure + +- `intent/` - Project artifacts (steel threads, docs, work tracking) + - `st/` - Steel threads organized as directories + - `docs/` - Technical documentation + - `llm/` - LLM-specific guidelines +- `backlog/` - Task management (if using Backlog.md) +- `.intent/` - Configuration and metadata + +## Steel Threads + +Steel threads are organized as directories under `intent/st/`: +- Each steel thread has its own directory (e.g., ST0001/) +- Minimum required file is `info.md` with metadata +- Optional files: design.md, impl.md, tasks.md, results.md + +## Commands + +- `intent st new "Title"` - Create a new steel thread +- `intent st list` - List all steel threads +- `intent st show <id>` - Show steel thread details +- `intent doctor` - Check configuration +- `intent help` - Get help + +## Migration Notes + +This project was migrated from STP to Intent v2.0.0 on 2025-07-17. +- Old structure: `stp/prj/st/`, `stp/eng/`, etc. +- New structure: `intent/st/`, `intent/docs/`, etc. +- Configuration moved from YAML to JSON format + +## Author + +matts diff --git a/stp/.config/version b/examples/v1.2.1-project/backup-20250717-001454/stp/.config/version similarity index 100% rename from stp/.config/version rename to examples/v1.2.1-project/backup-20250717-001454/stp/.config/version diff --git a/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/design.md b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/design.md new file mode 100644 index 0000000..41850cd --- /dev/null +++ b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/design.md @@ -0,0 +1,22 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# ST0001: Design Document + +## Overview + +This steel thread introduces the directory-based structure for organizing steel thread content. + +## Design Decisions + +1. Each steel thread gets its own directory +2. Standard files include: info.md, design.md, impl.md, tasks.md, results.md +3. Allows for additional files as needed + +## Benefits + +- Better organization +- Easier to find related content +- Supports more complex steel threads +- Enables file-specific version tracking \ No newline at end of file diff --git a/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/info.md b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/info.md new file mode 100644 index 0000000..4791567 --- /dev/null +++ b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0001/info.md @@ -0,0 +1,34 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: Completed +created: 20250115 +completed: 20250120 +--- +# ST0001: Directory-Based Structure + +- **Status**: Completed +- **Created**: 2025-01-15 +- **Completed**: 2025-01-20 +- **Author**: Directory User + +## Objective + +Demonstrate the v1.2.1 directory-based steel thread structure. + +## Context + +Starting with v1.2.1, steel threads are organized as directories rather than single files. This allows for better organization of related content. + +## Related Steel Threads + +None - this is the first steel thread. + +## Context for LLM + +This example shows the new directory structure where each steel thread has its own folder containing: +- info.md (this file) +- design.md +- impl.md +- tasks.md +- results.md \ No newline at end of file diff --git a/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0002/info.md b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0002/info.md new file mode 100644 index 0000000..fe6cad0 --- /dev/null +++ b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0002/info.md @@ -0,0 +1,25 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: In Progress +created: 20250201 +completed: +--- +# ST0002: Feature Implementation + +- **Status**: In Progress +- **Created**: 2025-02-01 +- **Completed**: +- **Author**: Directory User + +## Objective + +Implement a new feature using the directory-based structure. + +## Context + +This demonstrates an in-progress steel thread in v1.2.1 format. + +## Related Steel Threads + +- ST0001: Directory-Based Structure (established the pattern) \ No newline at end of file diff --git a/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0003/info.md b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0003/info.md new file mode 100644 index 0000000..2b4d08e --- /dev/null +++ b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/ST0003/info.md @@ -0,0 +1,21 @@ +--- +verblock: "15 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: Not Started +created: 20250215 +completed: +--- +# ST0003: Future Enhancement + +- **Status**: Not Started +- **Created**: 2025-02-15 +- **Completed**: +- **Author**: Directory User + +## Objective + +Placeholder for future enhancement to test migration of not-started threads. + +## Context + +This steel thread has been created but work has not begun. It exists to test how the migration handles different statuses. \ No newline at end of file diff --git a/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/steel_threads.md b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/steel_threads.md new file mode 100644 index 0000000..ef573fb --- /dev/null +++ b/examples/v1.2.1-project/backup-20250717-001454/stp/prj/st/steel_threads.md @@ -0,0 +1,34 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Directory-Based Structure | Completed | 2025-01-15 | 2025-01-20 | +| ST0002 | Feature Implementation | In Progress | 2025-02-01 | | +| ST0003 | Future Enhancement | Not Started | 2025-02-15 | | +<!-- END: STEEL_THREAD_INDEX --> + +## Status Definitions + +- **Not Started**: Steel thread has been created but work has not begun +- **In Progress**: Active development is underway +- **Completed**: All objectives have been achieved +- **On Hold**: Work has been paused +- **Cancelled**: Steel thread will not be completed + +## Directory Structure + +Starting with v1.2.1, each steel thread is organized as a directory containing: +- `info.md` - Overview and metadata +- `design.md` - Design decisions and approach +- `impl.md` - Implementation details +- `tasks.md` - Task tracking +- `results.md` - Results and outcomes \ No newline at end of file diff --git a/examples/v1.2.1-project/intent/st/ST0001/design.md b/examples/v1.2.1-project/intent/st/ST0001/design.md new file mode 100644 index 0000000..41850cd --- /dev/null +++ b/examples/v1.2.1-project/intent/st/ST0001/design.md @@ -0,0 +1,22 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# ST0001: Design Document + +## Overview + +This steel thread introduces the directory-based structure for organizing steel thread content. + +## Design Decisions + +1. Each steel thread gets its own directory +2. Standard files include: info.md, design.md, impl.md, tasks.md, results.md +3. Allows for additional files as needed + +## Benefits + +- Better organization +- Easier to find related content +- Supports more complex steel threads +- Enables file-specific version tracking \ No newline at end of file diff --git a/examples/v1.2.1-project/intent/st/ST0001/info.md b/examples/v1.2.1-project/intent/st/ST0001/info.md new file mode 100644 index 0000000..60c818e --- /dev/null +++ b/examples/v1.2.1-project/intent/st/ST0001/info.md @@ -0,0 +1,29 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version"\nstp_version: 1.2.1\nstatus: Completed\ncreated: 20250115\ncompleted: 20250120\n +--- +# ST0001: Directory-Based Structure + +- **Status**: Completed +- **Created**: 2025-01-15 +- **Completed**: 2025-01-20 +- **Author**: Directory User + +## Objective + +Demonstrate the v1.2.1 directory-based steel thread structure. + +## Context + +Starting with v1.2.1, steel threads are organized as directories rather than single files. This allows for better organization of related content. + +## Related Steel Threads + +None - this is the first steel thread. + +## Context for LLM + +This example shows the new directory structure where each steel thread has its own folder containing: +- info.md (this file) +- design.md +- impl.md +- tasks.md diff --git a/examples/v1.2.1-project/intent/st/ST0002/info.md b/examples/v1.2.1-project/intent/st/ST0002/info.md new file mode 100644 index 0000000..6c02ca3 --- /dev/null +++ b/examples/v1.2.1-project/intent/st/ST0002/info.md @@ -0,0 +1,20 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version"\nstp_version: 1.2.1\nstatus: In Progress\ncreated: 20250201\ncompleted: \n +--- +# ST0002: Feature Implementation + +- **Status**: In Progress +- **Created**: 2025-02-01 +- **Completed**: +- **Author**: Directory User + +## Objective + +Implement a new feature using the directory-based structure. + +## Context + +This demonstrates an in-progress steel thread in v1.2.1 format. + +## Related Steel Threads + diff --git a/examples/v1.2.1-project/intent/st/ST0003/info.md b/examples/v1.2.1-project/intent/st/ST0003/info.md new file mode 100644 index 0000000..50579e0 --- /dev/null +++ b/examples/v1.2.1-project/intent/st/ST0003/info.md @@ -0,0 +1,16 @@ +--- +verblock: "15 Feb 2025:v0.1: Directory User - Initial version"\nstp_version: 1.2.1\nstatus: Not Started\ncreated: 20250215\ncompleted: \n +--- +# ST0003: Future Enhancement + +- **Status**: Not Started +- **Created**: 2025-02-15 +- **Completed**: +- **Author**: Directory User + +## Objective + +Placeholder for future enhancement to test migration of not-started threads. + +## Context + diff --git a/examples/v1.2.1-project/intent/st/steel_threads.md b/examples/v1.2.1-project/intent/st/steel_threads.md new file mode 100644 index 0000000..ef573fb --- /dev/null +++ b/examples/v1.2.1-project/intent/st/steel_threads.md @@ -0,0 +1,34 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Directory-Based Structure | Completed | 2025-01-15 | 2025-01-20 | +| ST0002 | Feature Implementation | In Progress | 2025-02-01 | | +| ST0003 | Future Enhancement | Not Started | 2025-02-15 | | +<!-- END: STEEL_THREAD_INDEX --> + +## Status Definitions + +- **Not Started**: Steel thread has been created but work has not begun +- **In Progress**: Active development is underway +- **Completed**: All objectives have been achieved +- **On Hold**: Work has been paused +- **Cancelled**: Steel thread will not be completed + +## Directory Structure + +Starting with v1.2.1, each steel thread is organized as a directory containing: +- `info.md` - Overview and metadata +- `design.md` - Design decisions and approach +- `impl.md` - Implementation details +- `tasks.md` - Task tracking +- `results.md` - Results and outcomes \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/.config/version b/examples/v1.2.1-project/stp/.config/version new file mode 100644 index 0000000..30f010b --- /dev/null +++ b/examples/v1.2.1-project/stp/.config/version @@ -0,0 +1 @@ +stp_version: 1.2.1 \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/prj/st/ST0001/design.md b/examples/v1.2.1-project/stp/prj/st/ST0001/design.md new file mode 100644 index 0000000..41850cd --- /dev/null +++ b/examples/v1.2.1-project/stp/prj/st/ST0001/design.md @@ -0,0 +1,22 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# ST0001: Design Document + +## Overview + +This steel thread introduces the directory-based structure for organizing steel thread content. + +## Design Decisions + +1. Each steel thread gets its own directory +2. Standard files include: info.md, design.md, impl.md, tasks.md, results.md +3. Allows for additional files as needed + +## Benefits + +- Better organization +- Easier to find related content +- Supports more complex steel threads +- Enables file-specific version tracking \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/prj/st/ST0001/info.md b/examples/v1.2.1-project/stp/prj/st/ST0001/info.md new file mode 100644 index 0000000..4791567 --- /dev/null +++ b/examples/v1.2.1-project/stp/prj/st/ST0001/info.md @@ -0,0 +1,34 @@ +--- +verblock: "15 Jan 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: Completed +created: 20250115 +completed: 20250120 +--- +# ST0001: Directory-Based Structure + +- **Status**: Completed +- **Created**: 2025-01-15 +- **Completed**: 2025-01-20 +- **Author**: Directory User + +## Objective + +Demonstrate the v1.2.1 directory-based steel thread structure. + +## Context + +Starting with v1.2.1, steel threads are organized as directories rather than single files. This allows for better organization of related content. + +## Related Steel Threads + +None - this is the first steel thread. + +## Context for LLM + +This example shows the new directory structure where each steel thread has its own folder containing: +- info.md (this file) +- design.md +- impl.md +- tasks.md +- results.md \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/prj/st/ST0002/info.md b/examples/v1.2.1-project/stp/prj/st/ST0002/info.md new file mode 100644 index 0000000..fe6cad0 --- /dev/null +++ b/examples/v1.2.1-project/stp/prj/st/ST0002/info.md @@ -0,0 +1,25 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: In Progress +created: 20250201 +completed: +--- +# ST0002: Feature Implementation + +- **Status**: In Progress +- **Created**: 2025-02-01 +- **Completed**: +- **Author**: Directory User + +## Objective + +Implement a new feature using the directory-based structure. + +## Context + +This demonstrates an in-progress steel thread in v1.2.1 format. + +## Related Steel Threads + +- ST0001: Directory-Based Structure (established the pattern) \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/prj/st/ST0003/info.md b/examples/v1.2.1-project/stp/prj/st/ST0003/info.md new file mode 100644 index 0000000..2b4d08e --- /dev/null +++ b/examples/v1.2.1-project/stp/prj/st/ST0003/info.md @@ -0,0 +1,21 @@ +--- +verblock: "15 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +status: Not Started +created: 20250215 +completed: +--- +# ST0003: Future Enhancement + +- **Status**: Not Started +- **Created**: 2025-02-15 +- **Completed**: +- **Author**: Directory User + +## Objective + +Placeholder for future enhancement to test migration of not-started threads. + +## Context + +This steel thread has been created but work has not begun. It exists to test how the migration handles different statuses. \ No newline at end of file diff --git a/examples/v1.2.1-project/stp/prj/st/steel_threads.md b/examples/v1.2.1-project/stp/prj/st/steel_threads.md new file mode 100644 index 0000000..ef573fb --- /dev/null +++ b/examples/v1.2.1-project/stp/prj/st/steel_threads.md @@ -0,0 +1,34 @@ +--- +verblock: "01 Feb 2025:v0.1: Directory User - Initial version" +stp_version: 1.2.1 +--- +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Directory-Based Structure | Completed | 2025-01-15 | 2025-01-20 | +| ST0002 | Feature Implementation | In Progress | 2025-02-01 | | +| ST0003 | Future Enhancement | Not Started | 2025-02-15 | | +<!-- END: STEEL_THREAD_INDEX --> + +## Status Definitions + +- **Not Started**: Steel thread has been created but work has not begun +- **In Progress**: Active development is underway +- **Completed**: All objectives have been achieved +- **On Hold**: Work has been paused +- **Cancelled**: Steel thread will not be completed + +## Directory Structure + +Starting with v1.2.1, each steel thread is organized as a directory containing: +- `info.md` - Overview and metadata +- `design.md` - Design decisions and approach +- `impl.md` - Implementation details +- `tasks.md` - Task tracking +- `results.md` - Results and outcomes \ No newline at end of file diff --git a/intent/docs/creating-custom-agents.md b/intent/docs/creating-custom-agents.md new file mode 100644 index 0000000..0594d0d --- /dev/null +++ b/intent/docs/creating-custom-agents.md @@ -0,0 +1,392 @@ +--- +verblock: "15 Aug 2025:v1.0: Created comprehensive guide for Intent agent creation" +intent_version: 2.2.0 +--- + +# Creating Custom Intent Agents + +This guide provides step-by-step instructions for creating custom Intent agents that integrate with Claude Code's sub-agent system. + +## Overview + +Intent agents are specialized AI assistants with domain-specific knowledge and focused expertise. They extend Claude's capabilities by providing: +- Dedicated context windows separate from main conversation +- Specialized system prompts and knowledge +- Focused tool access appropriate to their domain +- Comprehensive results for specific tasks + +## Prerequisites + +- Intent v2.2.0 or later installed +- Claude Code CLI installed and configured +- Basic understanding of YAML frontmatter and JSON + +## Agent Structure + +Each Intent agent consists of: +- **Directory**: `intent/agents/agent-name/` +- **Agent Definition**: `agent.md` with YAML frontmatter and system prompt +- **Metadata**: `metadata.json` with version and configuration details + +## Step-by-Step Creation Process + +### 1. Create Agent Directory + +Create a new directory under `intent/agents/` for your agent: + +```bash +mkdir -p intent/agents/your-agent-name/ +cd intent/agents/your-agent-name/ +``` + +**Naming Convention:** +- Use lowercase with hyphens (e.g., `security-reviewer`, `api-designer`) +- Be descriptive but concise +- Avoid spaces or special characters + +### 2. Create Agent Definition (`agent.md`) + +Create the main agent file with YAML frontmatter and system prompt: + +```markdown +--- +name: your-agent-name +description: Brief one-line description of your agent's purpose and expertise +tools: Bash, Read, Write, Edit, Grep +--- + +You are a specialized [DOMAIN] expert assistant with deep knowledge in [SPECIFIC AREAS]. + +## Your Expertise + +You have extensive experience in: +- [Primary capability 1] +- [Primary capability 2] +- [Primary capability 3] +- [Framework/tool expertise if applicable] + +## Your Role + +When working with users, you should: +1. [Specific behavior 1] +2. [Specific behavior 2] +3. [Domain-specific guidelines] + +## Best Practices + +Always follow these principles: +- [Domain-specific best practice 1] +- [Domain-specific best practice 2] +- [Quality standards for your domain] + +## When to Use This Agent + +Use this agent for: +- [Specific use case 1] +- [Specific use case 2] +- [Complex workflow description] + +## Integration with Intent + +When working within Intent projects: +- Reference steel threads when relevant +- Document decisions in appropriate locations +- Generate tasks for backlog when needed +- Follow Intent project structure and conventions + +## Example Usage Patterns + +### Basic Pattern +``` +Task( + description="Short description of task", + prompt="Detailed instructions for the agent including context and requirements", + subagent_type="your-agent-name" +) +``` + +### Complex Workflow +[Describe how this agent fits into larger workflows] + +## Quality Standards + +Ensure your responses: +- [Quality standard 1] +- [Quality standard 2] +- [Output format requirements] +``` + +**Required YAML Fields:** +- `name`: Must match directory name +- `description`: One-line summary (used in agent listings) +- `tools`: Array of Claude Code tools this agent can access + +**Available Tools:** +- `Bash`: Execute shell commands +- `Read`: Read files from filesystem +- `Write`: Create new files +- `Edit`: Modify existing files +- `Grep`: Search file contents +- `WebFetch`: Fetch web content +- `Glob`: Find files by pattern +- `LS`: List directory contents + +### 3. Create Metadata File (`metadata.json`) + +Create the metadata configuration: + +```json +{ + "name": "your-agent-name", + "version": "1.0.0", + "description": "Detailed description of agent capabilities and use cases", + "author": "Your Name or Organization", + "tools": ["Bash", "Read", "Write", "Edit", "Grep"], + "tags": ["domain", "framework", "specialty", "relevant-keywords"] +} +``` + +**Required Fields:** +- `name`: Must match directory name and agent.md frontmatter +- `version`: Semantic version (start with 1.0.0) +- `description`: Detailed explanation of capabilities +- `author`: Creator information +- `tools`: Must match tools list in agent.md +- `tags`: Keywords for discovery and categorization + +### 4. Install the Agent + +Install your custom agent to make it available in Claude Code: + +```bash +intent agents install your-agent-name +``` + +This copies the agent to `~/.claude/agents/` where Claude Code can access it. + +**Installation Options:** +- `intent agents install your-agent-name` - Install specific agent +- `intent agents install --force` - Skip confirmation prompts +- `intent agents install --all` - Install all available agents + +### 5. Verify Installation + +Check that your agent is properly installed: + +```bash +# List all agents to see your new agent +intent agents list + +# Show detailed information about your agent +intent agents show your-agent-name + +# Check agent health and integrity +intent agents status +``` + +### 6. Test the Agent + +Test your agent through Claude Code using the Task tool: + +``` +Task( + description="Test custom agent", + prompt="Perform a simple task to verify the agent is working correctly", + subagent_type="your-agent-name" +) +``` + +## Example: Creating a Security Review Agent + +Here's a complete example for a security-focused agent: + +**Directory:** `intent/agents/security-reviewer/` + +**agent.md:** +```markdown +--- +name: security-reviewer +description: Security specialist for code review and vulnerability assessment +tools: Bash, Read, Write, Edit, Grep +--- + +You are a cybersecurity expert specializing in application security, code review, and vulnerability assessment. + +## Your Expertise + +You have deep knowledge in: +- OWASP Top 10 vulnerabilities and mitigations +- Secure coding practices across multiple languages +- Authentication and authorization patterns +- Data protection and encryption standards +- Security testing methodologies + +## Your Role + +When reviewing code or designs: +1. Identify potential security vulnerabilities +2. Suggest specific remediation strategies +3. Recommend security best practices +4. Assess compliance with security standards + +## Security Review Checklist + +Always evaluate: +- Input validation and sanitization +- Authentication and session management +- Authorization and access controls +- Data encryption and protection +- Error handling and information disclosure +- Dependency vulnerabilities + +## Integration with Intent + +- Document security findings in steel thread design docs +- Create security tasks in backlog for remediation +- Reference security requirements in steel threads +- Maintain security documentation in intent/docs/ +``` + +**metadata.json:** +```json +{ + "name": "security-reviewer", + "version": "1.0.0", + "description": "Security specialist for comprehensive code review and vulnerability assessment with OWASP expertise", + "author": "Security Team", + "tools": ["Bash", "Read", "Write", "Edit", "Grep"], + "tags": ["security", "owasp", "vulnerability", "code-review", "compliance"] +} +``` + +## Best Practices for Agent Creation + +### System Prompt Design +1. **Be Specific**: Define clear expertise boundaries and capabilities +2. **Provide Context**: Explain when and how the agent should be used +3. **Include Examples**: Show typical usage patterns and workflows +4. **Set Quality Standards**: Define output expectations and quality criteria + +### Tool Selection +1. **Minimal Necessary**: Only include tools the agent actually needs +2. **Consider Security**: Be cautious with Bash access for security-focused agents +3. **Match Capabilities**: Ensure tools align with agent's intended functionality + +### Documentation Quality +1. **Clear Instructions**: Write for someone unfamiliar with your domain +2. **Complete Examples**: Provide full, working examples +3. **Integration Guidance**: Explain how agent fits into Intent workflows +4. **Maintenance Notes**: Include version history and update guidance + +### Testing and Validation +1. **Functional Testing**: Verify all advertised capabilities work +2. **Integration Testing**: Test within actual Intent project workflows +3. **Documentation Testing**: Ensure examples and instructions are accurate +4. **Performance Testing**: Check response quality and relevance + +## Troubleshooting + +### Common Issues + +**Agent Not Listed** +- Check directory structure matches `intent/agents/agent-name/` +- Verify `agent.md` and `metadata.json` exist +- Ensure JSON syntax is valid + +**Installation Fails** +- Verify name consistency across directory, agent.md, and metadata.json +- Check YAML frontmatter syntax in agent.md +- Ensure tools list is valid + +**Agent Doesn't Respond Properly** +- Review system prompt clarity and specificity +- Check tool permissions and availability +- Verify agent scope matches intended use cases + +**Performance Issues** +- Simplify system prompt if too complex +- Reduce tool set to essential capabilities only +- Focus agent scope on specific domain + +### Debugging Commands + +```bash +# Check agent configuration +intent agents show your-agent-name + +# Verify installation status +intent agents status --verbose + +# Reinstall agent +intent agents install your-agent-name --force + +# Check Intent configuration +intent doctor +``` + +## Updating Agents + +To update an existing agent: + +1. Modify `agent.md` and/or `metadata.json` +2. Update version number in `metadata.json` +3. Reinstall: `intent agents install your-agent-name --force` +4. Test updated functionality + +## Sharing Agents + +To share agents with others: + +1. **Package Directory**: Include entire `intent/agents/agent-name/` directory +2. **Document Dependencies**: List any required tools or configurations +3. **Provide Examples**: Include usage examples and test cases +4. **Version Control**: Use semantic versioning for updates + +## Advanced Features + +### Custom Slash Commands + +Agents can implement custom slash commands for specialized workflows: + +```markdown +## Custom Commands + +This agent supports these slash commands: + +### /security-scan +Performs comprehensive security scan of specified files or directories. + +Usage: `/security-scan path/to/code` + +### /compliance-check +Evaluates code against specific compliance standards. + +Usage: `/compliance-check --standard=SOC2 path/to/files` +``` + +### Multi-Agent Workflows + +Design agents to work together in complex workflows: + +```markdown +## Workflow Integration + +This agent works well with: +- `intent` agent for project structure +- `code-reviewer` agent for general code quality +- `documentation` agent for security documentation +``` + +## References + +- [Intent Agent System Documentation](../llm/llm_preamble.md) +- [Claude Code Sub-Agents](https://docs.anthropic.com/en/docs/claude-code/sub-agents) +- [Intent Commands Reference](../../README.md#commands) +- [Agent Examples](../../agents/) + +--- + +**Need Help?** +- Run `intent help agents` for command reference +- Use `intent doctor` to check configuration +- Check existing agents in `agents/` directory for examples \ No newline at end of file diff --git a/intent/docs/exemplars/prps/Wirasm--PRPs-agentic-eng- Prompts, workflows and more for agentic engineering.webloc b/intent/docs/exemplars/prps/Wirasm--PRPs-agentic-eng- Prompts, workflows and more for agentic engineering.webloc new file mode 100644 index 0000000..88e7bb2 --- /dev/null +++ b/intent/docs/exemplars/prps/Wirasm--PRPs-agentic-eng- Prompts, workflows and more for agentic engineering.webloc @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>URL</key> + <string>https://github.com/Wirasm/PRPs-agentic-eng</string> +</dict> +</plist> diff --git a/intent/eng/tpd/1_introduction.md b/intent/eng/tpd/1_introduction.md new file mode 100644 index 0000000..9be322a --- /dev/null +++ b/intent/eng/tpd/1_introduction.md @@ -0,0 +1,82 @@ +--- +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Updated for Intent v2.1.0" +intent_version: 2.1.0 +--- +# 1. Introduction + +[index](<./technical_product_design.md>) + +## 1.1 Purpose + +Intent (formerly the Steel Thread Process or STP) is a system designed to create a structured workflow and documentation process for developers working collaboratively with Large Language Models (LLMs) such as Claude Code. Intent provides templates, scripts, and process guidelines to enhance productivity while ensuring high-quality documentation as a byproduct of the development process. + +### 1.1.1 The Rebrand to Intent + +In July 2025, STP was rebranded to "Intent" to better reflect the system's core purpose: capturing and preserving the intention behind software development decisions. While the methodology remains the "Steel Thread Process," the tool itself is now Intent. + +## 1.2 Scope + +Intent v2.1.0 encompasses: + +- A flattened directory structure under `intent/` for organizing documentation +- JSON-based configuration system (.intent/config.json) +- Shell scripts for managing Intent workflows (intent_* commands) +- The Steel Thread Process methodology for incremental development +- Enhanced integration patterns for working with LLMs +- Advanced Backlog.md integration with status filtering +- Automated steel thread status synchronization +- Migration tools for upgrading from any STP/Intent version +- Diagnostic and setup tools (doctor, bootstrap) + +Intent is designed to be lightweight, adaptable, and to work alongside existing development workflows without requiring significant changes to development practices. + +## 1.3 Definitions + +| Term | Definition | +|----------------|--------------------------------------------------------------------------------------------------| +| Intent | The tool and framework for intention-aware development (v2.1.0) | +| Steel Thread | A self-contained unit of work that represents a logical piece of functionality to be implemented | +| LLM | Large Language Model, an AI system capable of understanding and generating text | +| Context Window | The amount of text an LLM can process in a single interaction | +| Backlog | Task management system integrated with Intent for tracking fine-grained work items | +| Task | Individual unit of work linked to a steel thread, tracked in Backlog | +| Bootstrap | Initial global setup process for Intent installation | +| Doctor | Diagnostic tool for identifying and fixing Intent configuration issues | + +## 1.4 System Overview + +Intent v2.1.0 operates as a meta-layer on top of existing development processes. It provides structure for: + +1. **Documentation Management**: Flattened structure under `intent/` for all documentation +2. **LLM Collaboration**: Enhanced guidelines and tools for effective AI assistance +3. **Incremental Development**: The Steel Thread methodology for breaking work into manageable units +4. **Project Tracking**: Work-in-progress tracking and project history +5. **Task Management**: Advanced Backlog.md integration with configurable status filtering +6. **Status Synchronization**: Automatic steel thread status updates based on task completion +7. **Configuration Management**: JSON-based configuration with hierarchy support +8. **Migration Support**: Tools to upgrade from any previous STP version +9. **Self-Hosting**: Intent is developed using Intent itself + +The system remains intentionally simple, using markdown files and shell scripts to maximize portability and minimize dependencies. + +## 1.5 References + +- Modern LLM-assisted development practices +- Documentation-as-code methodologies +- Incremental development processes +- Intent Blog Series (docs/blog/) +- Migration from STP to Intent v2.1.0 + +## 1.6 Version History + +- **v0.0.0 - v1.2.1**: Original Steel Thread Process (STP) development +- **v2.0.0 (July 2025)**: Complete rebrand to Intent with: + - Flattened directory structure + - JSON configuration system + - New bootstrap, doctor, and upgrade commands + - Enhanced Backlog.md integration + - Self-hosting capability +- **v2.1.0 (July 2025)**: Enhanced agent system with: + - Agent initialization command + - Improved upgrade from v2.0.0 + - Better agent manifest management diff --git a/intent/eng/tpd/2_requirements.md b/intent/eng/tpd/2_requirements.md new file mode 100644 index 0000000..49eeebd --- /dev/null +++ b/intent/eng/tpd/2_requirements.md @@ -0,0 +1,114 @@ +--- +verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" +stp_version: 1.2.0 +--- +# 2. Requirements + +[index](<./technical_product_design.md>) + +## 2.1 Functional Requirements + +### 2.1.1 Documentation Management + +| ID | Requirement | +|--------|----------------------------------------------------------------------------| +| FR-1.1 | The system shall provide templates for all required documentation types | +| FR-1.2 | The system shall maintain project history and work-in-progress tracking | +| FR-1.3 | The system shall support steel thread creation, management, and completion | +| FR-1.4 | All documentation shall be in markdown format for maximum portability | + +### 2.1.2 LLM Collaboration + +| ID | Requirement | +|--------|--------------------------------------------------------------------| +| FR-2.1 | The system shall provide context management for LLM interactions | +| FR-2.2 | The system shall support canned prompts for common LLM tasks | +| FR-2.3 | The system shall facilitate passing context between LLM sessions | +| FR-2.4 | The system shall include LLM-specific instructions for consistency | + +### 2.1.3 Process Support + +| ID | Requirement | +|--------|------------------------------------------------------------------------------| +| FR-3.1 | The system shall support initialisation of STP within existing projects | +| FR-3.2 | The system shall provide commands for all common STP workflow operations | +| FR-3.3 | The system shall track completion status of steel threads | +| FR-3.4 | The system shall maintain independence from specific version control systems | + +### 2.1.4 Backlog Integration [AS-BUILT] + +| ID | Requirement | Status | +|--------|--------------------------------------------------------------------------------|-------------| +| FR-4.1 | The system shall integrate with Backlog.md for task management | ✓ Implemented | +| FR-4.2 | The system shall provide wrapper commands to avoid direct Backlog.md usage | ✓ Implemented | +| FR-4.3 | The system shall link tasks to steel threads using naming conventions | ✓ Implemented | +| FR-4.4 | The system shall synchronize steel thread status based on task completion | ✓ Implemented | +| FR-4.5 | The system shall support migration of embedded tasks to Backlog.md | ✓ Implemented | +| FR-4.6 | The system shall support configurable backlog_list_status filtering | ✓ Implemented | + +### 2.1.5 Configuration Management [AS-BUILT] + +| ID | Requirement | Status | +|--------|--------------------------------------------------------------------|--------------| +| FR-5.1 | The system shall support project-specific configuration | ✓ Implemented | +| FR-5.2 | The system shall provide sensible defaults for all configurations | ✓ Implemented | +| FR-5.3 | The system shall validate configurations on startup | ✓ Implemented | +| FR-5.4 | The system shall support environment variable overrides | ✓ Implemented | +| FR-5.5 | Configuration shall use JSON format (.intent/config.json) | ✓ Implemented | +| FR-5.6 | Configuration shall support hierarchy (env→local→global→default) | ✓ Implemented | + +## 2.2 Non-Functional Requirements + +### 2.2.1 Usability + +| ID | Requirement | +|---------|---------------------------------------------------------------| +| NFR-1.1 | The system shall be usable with minimal training | +| NFR-1.2 | The system shall provide clear documentation for all commands | +| NFR-1.3 | The system shall integrate with existing developer workflows | + +### 2.2.2 Performance + +| ID | Requirement | +|---------|--------------------------------------------------------------------| +| NFR-2.1 | The system shall have minimal impact on development performance | +| NFR-2.2 | The system shall optimise context usage for LLM interactions | +| NFR-2.3 | Commands shall complete within reasonable time frames (<2 seconds) | + +### 2.2.3 Compatibility + +| ID | Requirement | +|---------|---------------------------------------------------------------------------------------| +| NFR-3.1 | The system shall be compatible with common shell environments (bash, zsh) | +| NFR-3.2 | The system shall function on major operating systems (Linux, macOS, Windows with WSL) | +| NFR-3.3 | The system shall not interfere with or depend on specific development tools | + +### 2.2.4 Maintainability + +| ID | Requirement | +|---------|----------------------------------------------------------------| +| NFR-4.1 | The system shall be self-contained within project repositories | +| NFR-4.2 | The system shall support upgrading to newer STP versions | +| NFR-4.3 | The system shall be extensible for project-specific needs | + +## 2.3 Constraints + +| ID | Constraint | +|-------|--------------------------------------------------------------------------------------| +| CON-1 | The system must use only shell scripts and markdown for maximum portability | +| CON-2 | The system must not require external dependencies beyond common shell utilities | +| CON-3 | The system must be agnostic to LLM platforms while supporting specific optimisations | +| CON-4 | The system must respect the context window limitations of LLMs | +| CON-5 | [AS-BUILT] The system requires jq for JSON configuration parsing | + +## 2.4 AS-BUILT Notes + +All original requirements have been met or exceeded in Intent v2.0.0. Key additions include: + +1. **Enhanced Configuration**: JSON-based configuration with hierarchy support +2. **New Commands**: bootstrap, doctor, upgrade for better user experience +3. **Status Filtering**: Configurable backlog_list_status for focused task views +4. **Self-Hosting**: Intent is developed using Intent itself +5. **Migration Tools**: Comprehensive upgrade path from any STP version +6. **Flattened Structure**: Simplified directory layout under intent/ +7. **Command Naming**: Consistent intent_* naming pattern diff --git a/intent/eng/tpd/3_architecture.md b/intent/eng/tpd/3_architecture.md new file mode 100644 index 0000000..e0188fd --- /dev/null +++ b/intent/eng/tpd/3_architecture.md @@ -0,0 +1,290 @@ +--- +verblock: "08 Jul 2025:v0.2: Matthew Sinclair - Added Backlog.md integration architecture" +stp_version: 1.2.0 +--- +# 3. Architecture + +[index](<./technical_product_design.md>) + +## 3.1 System Architecture Overview + +The Steel Thread Process (STP) follows a modular architecture with three primary components: + +1. **Documentation Structure**: A standardized directory layout and document templates +2. **Command-line Interface**: Shell scripts for managing STP workflows +3. **Process Guidelines**: Documentation of workflow patterns and best practices + +This architecture is designed to be lightweight, portable, and to integrate with existing development environments without significant friction. + +``` +┌─────────────────────────────────────────────────────────────┐ +│ STP System │ +│ │ +│ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐│ +│ │ Documentation │ │ Command-line │ │ Process ││ +│ │ Structure │◄───┤ Interface │────► Guidelines ││ +│ └───────────────┘ └───────────────┘ └───────────────┘│ +│ ▲ ▲ ▲ │ +└─────────┼─────────────────────┼───────────────────┼─────────┘ + │ │ │ +┌─────────┼─────────────────────┼───────────────────┼─────────┐ +│ │ │ │ │ +│ ┌──────▼──────┐ ┌───────▼─────┐ ┌───────▼─────┐ │ +│ │ Project │ │ Shell │ │ LLM │ │ +│ │ Repository │ │ Environment │ │ Interaction │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ +│ Development Environment │ +└─────────────────────────────────────────────────────────────┘ +``` + +## 3.2 Component Architecture + +### 3.2.1 Documentation Structure + +The Documentation Structure consists of a standardized directory layout and markdown templates. Key features include: + +- **Directory Organization**: Clear separation of project, technical, user, and LLM-specific documentation +- **Templated Documents**: Standardized starting points for all document types +- **Cross-Referencing**: Internal links to maintain relationships between documents +- **Progressive Documentation**: Documents that evolve alongside the code + +``` +stp/ +├── _templ/ # Templates +├── bin/ # STP scripts +├── prj/ # Project documentation +│ ├── st/ # Steel threads +│ └── wip.md # Work in progress +├── eng/ # Engineering docs +│ └── tpd/ # Technical Product Design +├── usr/ # User documentation +└── llm/ # LLM-specific content +``` + +### 3.2.2 Command-line Interface + +The Command-line Interface provides shell-based tools for managing STP workflows. Key features include: + +- **Unified Command**: Single `stp` entry point with subcommands +- **Modular Implementation**: Each subcommand implemented as a separate script +- **Contextual Help**: Built-in documentation for commands +- **Environment Configuration**: Settings for STP behavior + +``` +┌─────────────────┐ +│ stp (main) │ +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ ┌─────────────────┐ +│ Command │───►│ Command- │ +│ Dispatcher │ │ specific │ +└────────┬────────┘ │ implementation │ + │ └─────────────────┘ + ▼ +┌─────────────────┐ +│ Help System │ +└─────────────────┘ +``` + +### 3.2.3 Process Guidelines + +The Process Guidelines define how Intent is used in practice. Key features include: + +- **Steel Thread Methodology**: Process for incremental development +- **LLM Collaboration**: Enhanced patterns for AI assistance +- **Documentation Lifecycle**: How documents evolve through project stages +- **Integration Points**: How Intent integrates with other practices +- **Self-Hosting**: Intent is developed using Intent itself + +## 3.3 Data Architecture [AS-BUILT] + +### 3.3.1 Configuration System + +Intent v2.0.0 uses a hierarchical JSON configuration system: + +``` +Configuration Hierarchy (highest to lowest priority): +1. Environment Variables (INTENT_*, AUTHOR, EDITOR) +2. Local Project Config (.intent/config.json) +3. Global User Config (~/.config/intent/config.json) +4. Built-in Defaults + +Example .intent/config.json: +{ + "version": "2.0.0", + "project_name": "MyProject", + "author": "username", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_dir": "backlog", + "intent_dir": "intent", + "backlog_list_status": "todo" // New in v2.0.0 +} +``` + +### 3.3.2 Data Types + +Intent manages several types of data: + +1. **Configuration Data**: JSON-based project and global settings +2. **Steel Thread Data**: Markdown files in intent/st/ directories +3. **Project Metadata**: Steel thread status, creation dates +4. **Work History**: Journal entries and completed threads +5. **Task Data**: Backlog.md integration with status tracking + +All data uses plain text formats (JSON and markdown) for maximum portability. + +## 3.4 Interface Architecture + +### 3.4.1 User Interfaces + +Intent provides multiple user interfaces: + +1. **Command-line Interface**: `intent` command with subcommands +2. **Document Structure**: Markdown for human and LLM consumption +3. **Configuration Interface**: JSON files for settings +4. **Diagnostic Interface**: `intent doctor` for troubleshooting +5. **Migration Interface**: `intent upgrade` for version transitions + +### 3.4.2 External System Interfaces + +STP is designed to interface with: + +1. **Version Control Systems**: Through normal file operations +2. **LLM Systems**: Through document content and canned prompts +3. **Development Environments**: Through standard shell integration +4. **Task Management Systems**: Through Backlog.md integration for fine-grained task tracking + +## 3.5 Architectural Decisions + +| Decision | Rationale | +|------------------------------|----------------------------------------------------------------------------| +| Use of Markdown | Maximizes portability and readability for both humans and LLMs | +| Shell Scripts Only | Ensures compatibility across development environments without dependencies | +| Directory-Based Organization | Creates clear structure while maintaining simplicity | +| Template-Driven Approach | Reduces friction in creating consistent documentation | +| Steel Thread Methodology | Breaks work into manageable units suitable for LLM collaboration | + +## 3.6 Integration Architecture + +STP is designed as an extensible system that can integrate with complementary tools while maintaining its core philosophy of simplicity and portability. + +### 3.6.1 Backlog.md Integration + +The integration with Backlog.md extends STP's capabilities with fine-grained task management while preserving the separation of concerns: + +#### Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────┐ +│ STP System │ +│ │ +│ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐│ +│ │ Steel Threads │ │ STP Commands │ │ Templates ││ +│ │ (Intent) │◄───┤ (Workflow) │────► (Structure) ││ +│ └───────────────┘ └───────────────┘ └───────────────┘│ +│ ▲ │ │ +│ │ ┌──────▼──────┐ │ +│ └──────────────┤ Integration │ │ +│ │ Layer │ │ +│ └──────┬──────┘ │ +└───────────────────────────────┼─────────────────────────────┘ + │ +┌───────────────────────────────┼─────────────────────────────┐ +│ ▼ │ +│ ┌─────────────┐ │ +│ │ Backlog.md │ │ +│ │ System │ │ +│ └─────────────┘ │ +│ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐│ +│ │ Task Tracking │ │ Kanban │ │ Task Files ││ +│ │ (Execution) │◄───┤ Board │────► (Storage) ││ +│ └───────────────┘ └───────────────┘ └───────────────┘│ +│ │ +│ External Task Management │ +└─────────────────────────────────────────────────────────────┘ +``` + +#### Component Responsibilities [AS-BUILT] + +**Intent Core Components:** +- **Steel Threads**: Capture objectives, context, and design in intent/st/ +- **Documentation**: Maintain narrative and specs in intent/docs/ +- **Configuration**: JSON-based settings in .intent/config.json +- **Process Coordination**: Orchestrate development workflow + +**Backlog.md Integration:** +- **Task Management**: Track implementation tasks with metadata +- **Status Filtering**: Configurable backlog_list_status for focused views +- **Visualisation**: Kanban board and browser interfaces + +**Integration Layer:** +- **Command Wrappers**: `intent bl`, `intent task`, `intent status` +- **Status Synchronisation**: Task completion drives thread status +- **Naming Conventions**: ST#### prefix links tasks to threads +- **Git Safety**: Wrappers prevent git operation conflicts + +#### Data Flow + +1. **Steel Thread Creation** → Integration layer creates linked task structure +2. **Task Updates** → Status changes propagate to steel thread status +3. **Migration** → Embedded tasks convert to Backlog.md format +4. **Queries** → Unified view of steel thread and task information + +#### Integration Points [AS-BUILT] + +1. **File System**: + - Intent: `/intent/st/` for steel threads (flattened) + - Backlog: `/backlog/` for task management + - Config: `/.intent/config.json` for settings + - No overlap in storage locations + +2. **Command Interface**: + - Unified `intent` command with subcommands + - All commands follow intent_* pattern + - Wrapper commands prevent git conflicts + - Help system integrated + +3. **Status Model**: + - Steel thread status derived from task completion + - Automatic synchronisation via `intent status sync` + - Manual override supported + - Configurable list filtering + +4. **Workflow Integration**: + - Steel threads define "what" and "why" (intent) + - Backlog tasks define "how" and "when" (execution) + - Clear separation of concerns + - Self-hosting proven + +### 3.6.2 Migration Architecture [AS-BUILT] + +Intent v2.0.0 includes comprehensive migration support: + +``` +Migration Flow: +1. Detect existing STP version +2. Create timestamped backup +3. Migrate directory structure (stp/* → intent/*) +4. Convert YAML configs to JSON +5. Update file formats and metadata +6. Create .intent/config.json +7. Update CLAUDE.md guidelines + +Supported Versions: +- v0.0.0 → v2.0.0 +- v1.2.0 → v2.0.0 +- v1.2.1 → v2.0.0 +``` + +## 3.7 AS-BUILT Summary + +Intent v2.0.0 represents a significant evolution from STP: +- Complete rebrand with consistent naming +- Flattened, intuitive directory structure +- JSON-based hierarchical configuration +- Enhanced Backlog.md integration +- New user-friendly commands +- Proven through self-hosting diff --git a/intent/eng/tpd/4_detailed_design.md b/intent/eng/tpd/4_detailed_design.md new file mode 100644 index 0000000..89de822 --- /dev/null +++ b/intent/eng/tpd/4_detailed_design.md @@ -0,0 +1,831 @@ +--- +verblock: "17 Jul 2025:v2.0.0: Matthew Sinclair - Updated for Intent v2.0.0 (As-Built)" +intent_version: 2.0.0 +--- +# 4. Detailed Design [AS-BUILT] + +[index](<./technical_product_design.md>) + +## 4.1 Directory Structure [AS-BUILT] + +Intent v2.0.0 uses a flattened directory structure that simplifies navigation: + +``` +<project_root>/ +├── .intent/ # Intent configuration +│ └── config.json # JSON configuration file +├── intent/ # Main Intent directory (flattened) +│ ├── st/ # Steel threads +│ │ ├── ST0001/ # Steel thread directory +│ │ │ ├── info.md # Main information (required) +│ │ │ ├── design.md # Design decisions (optional) +│ │ │ ├── impl.md # Implementation (optional) +│ │ │ └── tasks.md # Task tracking (optional) +│ │ ├── ST0002/ # Another steel thread +│ │ │ └── info.md # Minimum required file +│ │ └── ... +│ ├── docs/ # Documentation +│ │ ├── blog/ # Blog posts +│ │ │ ├── 0000-motivation-for-intent.md +│ │ │ ├── 0001-introduction-to-intent.md +│ │ │ └── ... +│ │ ├── eng/ # Engineering docs +│ │ │ └── tpd/ # Technical Product Design +│ │ └── usr/ # User documentation +│ ├── llm/ # LLM-specific content +│ └── wip.md # Work in progress +├── backlog/ # Backlog.md integration +│ ├── Backlog.md # Main backlog file +│ ├── tasks/ # Task files +│ └── ... +├── bin/ # Intent scripts +│ ├── intent # Main command +│ ├── intent_st # Steel thread commands +│ ├── intent_bl # Backlog wrapper +│ ├── intent_task # Task management +│ ├── intent_status # Status sync +│ ├── intent_init # Project initialization +│ ├── intent_bootstrap # Global setup +│ ├── intent_doctor # Diagnostics +│ ├── intent_upgrade # Migration tool +│ ├── intent_config # Configuration library +│ ├── intent_helpers # Helper functions +│ └── stp # Backward compatibility symlink +└── CLAUDE.md # Project guidelines for LLMs + +[Legacy STP structure removed - see v1.2.1 documentation] +``` + +## 4.2 Document Templates + +### 4.2.1 Configuration System [AS-BUILT] + +Intent v2.0.0 uses JSON configuration with hierarchy support: + +#### Configuration File Format + +```json +{ + "version": "2.0.0", + "project_name": "MyProject", + "author": "username", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_dir": "backlog", + "intent_dir": "intent", + "backlog_list_status": "todo" +} +``` + +#### Configuration Fields + +| Field | Description | Default | Added | +|-------|-------------|---------|-------| +| version | Intent version | 2.0.0 | v2.0.0 | +| project_name | Project identifier | (required) | v2.0.0 | +| author | Default author | $USER | v0.0.0 | +| created | Creation date | (auto) | v2.0.0 | +| st_prefix | Steel thread prefix | ST | v1.0.0 | +| backlog_dir | Backlog directory | backlog | v1.2.0 | +| intent_dir | Intent directory | intent | v2.0.0 | +| backlog_list_status | Default list filter | (none) | v2.0.0 | + +#### Configuration Hierarchy + +``` +1. Environment Variables (highest priority) + - INTENT_* variables + - AUTHOR, EDITOR + +2. Local Project Config + - .intent/config.json + +3. Global User Config + - ~/.config/intent/config.json + +4. Built-in Defaults (lowest priority) + - Hardcoded in intent_config +``` + +### 4.2.2 Document Metadata + +Intent documents use YAML frontmatter for metadata: + +```yaml +--- +verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" +intent_version: 2.0.0 +status: Not Started|In Progress|Completed|On Hold|Cancelled +created: YYYYMMDD +completed: YYYYMMDD +--- +``` + +**Metadata Fields:** +- `verblock`: Version tracking with date, version, author, description +- `intent_version`: Intent version for compatibility +- `status`: Current state of the document or steel thread +- `created`: Creation date in YYYYMMDD format +- `completed`: Completion date in YYYYMMDD format (when applicable) + +### 4.2.2 Section Markers + +STP uses HTML comment markers to identify sections in documents that can be automatically updated: + +```markdown +<!-- BEGIN: SECTION_NAME --> +(Content here will be automatically managed by STP commands) +<!-- END: SECTION_NAME --> +``` + +In particular, the steel_threads.md index file uses these markers to allow the `stp st sync` command to update the index while preserving manually added content outside the marked sections: + +```markdown +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|----|-------|--------|---------|-----------| +| ST0001 | Example Thread | Completed | 2025-03-01 | 2025-03-05 | +<!-- END: STEEL_THREAD_INDEX --> +``` + +### 4.2.3 Project Templates + +#### Work In Progress (WIP) Template + +The WIP document captures the current state of development and active tasks. + +**Structure:** + +```markdown +# Work In Progress + +## Current Focus +[Brief description of the current development focus] + +## Active Steel Threads +- ST####: [Brief description] +- ... + +## Upcoming Work +- [Item 1] +- ... + +## Notes +[Any additional notes about the current work] +``` + +#### Journal Template + +The Journal document maintains a chronological record of project activities. + +**Structure:** + +```markdown +# Project Journal + +## YYYY-MM-DD +### [Activity Title] +[Description of activity, decisions made, challenges encountered, etc.] + +## YYYY-MM-DD +... +``` + +#### Steel Thread Templates + +**Steel Threads Index Template:** + +```markdown +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +| ID | Title | Status | Created | Completed | +|-------------------------|---------|----------|----------|-----------| +| [ST0002](./ST0002/) | [Title] | [Status] | YYYYMMDD | YYYYMMDD | +| [ST0001](./ST0001/) | [Title] | [Status] | YYYYMMDD | YYYYMMDD | +<!-- END: STEEL_THREAD_INDEX --> +``` + +**Steel Thread Directory Structure (v1.2.1+):** + +Stepping with STP v1.2.1, steel threads are organized as directories containing multiple files: + +``` +ST####/ +├── info.md # Main information file (required) +├── design.md # Design decisions and approach +├── impl.md # Implementation details +└── tasks.md # Task tracking +``` + +**info.md Template (Main Information File):** + +```markdown +--- +verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" +stp_version: 2.0.0 +status: Not Started +created: YYYYMMDD +completed: +--- +# ST####: [Title] + +- **Status**: [Not Started|In Progress|Completed] +- **Created**: YYYY-MM-DD +- **Completed**: YYYY-MM-DD +- **Author**: Author Name + +## Objective +[Clear statement of what this steel thread aims to accomplish] + +## Context +[Background information and context for this steel thread] +``` + +**design.md Template:** + +```markdown +# Design - ST####: [Title] + +## Approach +[Planned approach for implementing this steel thread] + +## Key Design Decisions +[Document important design choices and rationale] + +## Architecture +[Architectural diagrams or descriptions if applicable] +``` + +**impl.md Template:** + +```markdown +# Implementation - ST####: [Title] + +## Implementation Notes +[Technical details about the implementation] + +## Code Changes +[Summary of code changes made] + +## Challenges +[Any implementation challenges encountered] +``` + +**tasks.md Template:** + +```markdown +# Tasks - ST####: [Title] + +Tasks are tracked in Backlog. View with: `stp task list ST####` + +## Task Summary +[High-level summary of tasks if needed] +``` + + +### 4.2.2 Engineering Templates + +Technical Product Design templates follow the structure outlined in previous sections. + +### 4.2.3 User Documentation Templates + +User Guide, Reference Guide, and Deployment Guide templates follow standard technical documentation formats. + +### 4.2.4 LLM Templates + +The LLM Preamble template provides context and instructions for the LLM: + +```markdown +# LLM Preamble + +## Project Context +[Brief description of the project] + +## Collaboration Guidelines +[Guidelines for how the LLM should collaborate with developers] + +## Code Style and Conventions +[Code style and conventions to follow] + +## Document Structure +[Description of the document structure for context] + +## Process Guidelines +[Guidelines for the steel thread process] +``` + +## 4.3 Command-line Interface [AS-BUILT] + +### 4.3.1 Command Structure + +Intent v2.0.0 uses a unified command structure: + +``` +intent <command> [options] [arguments] +``` + +#### Primary Commands + +| Command | Description | Added | +|---------|-------------|-------| +| init | Initialize Intent in a project | v0.0.0 | +| st | Manage steel threads | v0.0.0 | +| bl | Enhanced Backlog.md wrapper | v1.2.0 | +| task | Manage tasks linked to threads | v1.2.0 | +| status | Synchronize thread/task status | v1.2.0 | +| bootstrap | Global Intent setup | v2.0.0 | +| doctor | Diagnose configuration issues | v2.0.0 | +| upgrade | Migrate from any STP version | v2.0.0 | +| help | Display comprehensive help | v0.0.0 | + +#### Steel Thread Subcommands + +``` +intent st new [title] # Create new steel thread +intent st list [--status X] # List threads with filtering +intent st show ST#### # Display thread contents +intent st edit ST#### [file] # Edit thread files +``` + +#### Backlog Subcommands + +``` +intent bl create [options] # Create task (git-safe) +intent bl list [--all] # List tasks (respects config) +intent bl done <task-id> # Mark task complete +``` + +#### New v2.0.0 Features + +- **backlog_list_status**: Configurable default task filtering +- **--all flag**: Override status filtering +- **doctor --fix**: Automatic issue resolution +- **bootstrap**: One-time global setup + +### 4.3.2 Command Implementation [AS-BUILT] + +Intent v2.0.0 uses modular shell scripts: + +#### Script Architecture + +``` +bin/ +├── intent # Main dispatcher +├── intent_<command> # Command implementations +├── intent_config # Configuration loader (JSON) +├── intent_helpers # Shared utility functions +└── stp # Backward compatibility symlink +``` + +#### Key Implementation Files + +| Script | Purpose | Key Features | +|--------|---------|-------------| +| intent | Main entry point | Command dispatch, version check | +| intent_config | Config management | JSON parsing, hierarchy support | +| intent_helpers | Utilities | Version detection, migration | +| intent_st | Steel threads | Create, list, show, edit | +| intent_bl | Backlog wrapper | Git-safe operations, filtering | +| intent_task | Task management | Thread linking, status tracking | +| intent_status | Status sync | Task completion analysis | +| intent_bootstrap | Global setup | First-time configuration | +| intent_doctor | Diagnostics | Issue detection and fixes | +| intent_upgrade | Migration | Any version to v2.0.0 | + +### 4.3.3 Help System [AS-BUILT] + +Intent v2.0.0 provides comprehensive help: + +``` +intent help # General help +intent help <command> # Command-specific help +intent <command> -h # Quick help +``` + +#### Help Implementation + +- Embedded help in each command script +- Consistent format across all commands +- Examples included for common usage +- Version information displayed + +### 4.3.4 Configuration Loading [AS-BUILT] + +The configuration system loads settings in order: + +```bash +# From intent_config +load_intent_config() { + # 1. Set defaults + INTENT_DIR="intent" + BACKLOG_DIR="backlog" + + # 2. Find project root + PROJECT_ROOT=$(find_project_root) + + # 3. Load global config + parse_json "~/.config/intent/config.json" + + # 4. Load local config (overrides global) + parse_json ".intent/config.json" + + # 5. Apply environment variables (highest priority) + [ -n "$INTENT_DIR" ] && INTENT_DIR="$INTENT_DIR" +} +``` + +The help system uses markdown files in a `.help` directory: + +``` +.help/ +├── init.help.md +├── st.help.md +└── ... +``` + +Each help file follows a standardized format with sections for short description, detailed description, and usage information. + +## 4.4 Process Guidelines + +### 4.4.1 Steel Thread Workflow + +The steel thread workflow follows these steps: + +1. **Creation**: Developer creates a new steel thread +2. **Planning**: Developer defines objective, context, and approach +3. **Implementation**: Developer implements tasks with LLM assistance +4. **Documentation**: LLM and developer document implementation details +5. **Completion**: Developer marks the steel thread as complete + +### 4.4.2 LLM Collaboration Model + +The LLM collaboration model defines how developers work with LLMs: + +1. **Context Setting**: Share relevant project documents with the LLM +2. **Task Description**: Clearly describe the current task +3. **Interactive Development**: Iteratively work with the LLM to develop solutions +4. **Documentation**: Have the LLM update documentation as work progresses +5. **Context Preservation**: Capture key information for future sessions + +## 4.5 Data Flow + +### 4.5.1 Command Data Flow + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ User Input │────►│ STP Command │────►│ Subcommand │ +└─────────────┘ └─────────────┘ └──────┬──────┘ + │ + ▼ + ┌─────────────┐ + │ Project │ + │ Documents │ + └─────────────┘ +``` + +### 4.5.2 Document Update Flow + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Developer │────►│ LLM │────►│ Updated │ +│ Input │ │ │ │ Documents │ +└─────────────┘ └─────────────┘ └─────────────┘ +``` + +## 4.6 Error Handling + +STP implements error handling at multiple levels: + +1. **Command Validation**: Validate input parameters and provide clear error messages +2. **Execution Validation**: Check for required files and directories before operations +3. **Status Reporting**: Provide clear success/failure indicators for operations +4. **Recovery Guidance**: Suggest recovery steps when errors occur + +## 4.7 Security Considerations + +STP addresses security through: + +1. **No External Dependencies**: Minimizing attack surface through self-contained implementation +2. **File Permission Management**: Ensuring appropriate permissions for created files +3. **Input Validation**: Sanitizing user input to prevent script injection +4. **No Sensitive Data**: Avoiding storage of credentials or sensitive information + +## 4.8 Integration Implementations [AS-BUILT] + +### 4.8.1 Enhanced Backlog.md Integration + +Intent v2.0.0 provides enhanced Backlog.md integration with configurable filtering and improved git safety. + +#### Command Implementations [AS-BUILT] + +**1. Backlog Wrapper (`intent_bl`)** + +The `intent bl` command provides enhanced functionality: +- Automatic `--plain` flag for git safety +- Configurable status filtering via `backlog_list_status` +- `--all` flag to override filtering +- Consistent error handling + +```bash +# Key wrapper behaviors +intent bl list → backlog task list --plain [filtered] +intent bl list --all → backlog task list --plain +intent bl board → backlog board --plain +intent bl create → backlog task create [with prefix] +``` + +**Configuration:** +```json +{ + "backlog_list_status": "todo" // Default filter +} +``` + +**2. Task Management (`intent_task`)** + +The `intent task` command links steel threads to Backlog tasks: + +```bash +intent task create ST#### "title" # Creates with ST prefix +intent task list ST#### # Lists thread's tasks +intent task count ST#### # Task completion stats +``` + +**3. Status Synchronisation (`intent_status`)** + +The `intent status` command synchronizes thread and task states: + +```bash +intent status show ST#### # Display status comparison +intent status sync ST#### # Update thread from tasks +intent status check # Project-wide status report +``` + +Status mapping rules: +- All tasks in draft/none → Steel thread: "Not Started" +- Any task in todo/in-progress → Steel thread: "In Progress" +- All tasks done/archived → Steel thread: "Completed" +- Manual override for "On Hold" and "Cancelled" + +**4. Migration Tool (`stp_migrate`)** + +The `stp migrate` command converts embedded task lists to Backlog: + +```bash +stp migrate <ST####> # Migrate specific thread +stp migrate --all-active # Migrate all active threads +stp migrate --dry-run # Preview migration +``` + +Migration process: +1. Parse markdown checkboxes from steel thread +2. Create Backlog tasks with appropriate status +3. Update steel thread to reference Backlog +4. Preserve completion status + +#### Naming Conventions + +Tasks linked to steel threads follow strict naming: + +``` +ST#### - <task description> +``` + +Examples: +- `ST0014 - Create directory structure` +- `ST0014 - Update command implementations` +- `ST0014 - Add integration tests` + +This convention enables: +- Automatic linking between systems +- Filtering and grouping operations +- Status synchronisation + +#### File Structure Integration [AS-BUILT] + +``` +<project_root>/ +├── .intent/ +│ └── config.json # Intent configuration +├── intent/ +│ └── st/ +│ ├── ST0001/ # Steel thread directory +│ │ ├── info.md +│ │ └── tasks.md # Links to Backlog +│ └── ST0002/ +└── backlog/ + ├── Backlog.md # Main backlog file + ├── tasks/ # Task files + │ ├── task-001.md # ST0001 - Task title + │ └── task-002.md + └── ... +``` + +Key principles: +- Complete separation of STP and Backlog directories +- No file conflicts or overlaps +- Each system maintains its own structure + +#### Workflow Integration Patterns + +**1. New Feature Development** + +```bash +# 1. Create steel thread for high-level planning +stp st new "Implement user authentication" +# Output: Created ST0015 + +# 2. Create implementation tasks +stp task create ST0015 "Design auth database schema" +stp task create ST0015 "Implement login endpoint" +stp task create ST0015 "Create registration flow" +stp task create ST0015 "Add session management" +stp task create ST0015 "Write integration tests" + +# 3. Work through tasks +stp bl board # View Kanban board +backlog task edit <id> --status in-progress + +# 4. Sync status back to steel thread +stp status sync ST0015 +``` + +**2. Research and Design** + +```bash +# 1. Create steel thread for research +stp st new "Research caching strategies" + +# 2. Create investigation tasks +stp task create ST0016 "Review Redis capabilities" +stp task create ST0016 "Benchmark Memcached performance" +stp task create ST0016 "Evaluate in-memory options" +stp task create ST0016 "Document recommendations" + +# 3. Track progress +stp task list ST0016 +``` + +**3. Bug Fix Workflow** + +```bash +# 1. Create steel thread for bug +stp st new "Fix authentication timeout issue" + +# 2. Create diagnostic and fix tasks +stp task create ST0017 "Reproduce timeout issue" +stp task create ST0017 "Debug session handling" +stp task create ST0017 "Implement fix" +stp task create ST0017 "Add regression test" + +# 3. Fast status check +stp status show ST0017 +``` + +#### Error Handling + +The integration includes specific error handling: + +1. **Missing Backlog Installation**: Clear message with installation instructions +2. **Git Fetch Errors**: Automatically prevented with `--plain` flag +3. **Invalid Steel Thread IDs**: Validation before task creation +4. **Status Conflicts**: Warning when manual status doesn't match tasks + +#### Testing Infrastructure + +Integration tests are provided in: +- `stp/tests/task/task_test.bats` - Task command tests +- `stp/tests/status/status_test.bats` - Status synchronisation tests +- `stp/tests/migrate/migrate_test.bats` - Migration tests +- `stp/tests/backlog/backlog_test.bats` - Wrapper command tests + +Test coverage includes: +- Command functionality +- Error conditions +- Edge cases +- Integration workflows + +### 4.8.2 Migration System [AS-BUILT v2.0.0] + +Intent v2.0.0 includes comprehensive migration support for upgrading from any STP version: + +#### Migration Command + +```bash +intent upgrade [--backup-dir DIR] +``` + +#### Migration Process + +1. **Version Detection** + - Checks for .stp-config (YAML) or .intent/config.json + - Identifies version from 0.0.0 to 1.2.1 + +2. **Backup Creation** + - Creates timestamped backup directory + - Preserves entire project state + +3. **Structure Migration** + ``` + stp/prj/st/ST####.md → intent/st/ST####/info.md + stp/eng/ → intent/docs/eng/ + stp/usr/ → intent/docs/usr/ + stp/llm/ → intent/llm/ + ``` + +4. **Configuration Conversion** + ```yaml + # Old YAML (.stp-config) + author: username + editor: vim + ``` + + ```json + # New JSON (.intent/config.json) + { + "version": "2.0.0", + "author": "username", + "editor": "vim", + "intent_dir": "intent", + "backlog_dir": "backlog" + } + ``` + +5. **YAML Frontmatter Fix** + - Handles files without frontmatter + - Preserves existing content + +### 4.8.3 Bootstrap System [AS-BUILT v2.0.0] + +Global Intent setup for first-time users: + +```bash +intent bootstrap [--force] +``` + +#### Bootstrap Features + +1. **INTENT_HOME Detection** + - Automatically finds installation + - Validates directory structure + +2. **Global Configuration** + - Creates ~/.config/intent/config.json + - Sets default values + +3. **Shell Integration** + - Provides PATH setup instructions + - Detects shell type (bash/zsh/fish) + +4. **Validation** + - Checks all commands accessible + - Verifies jq availability + +### 4.8.4 Diagnostic System [AS-BUILT v2.0.0] + +Configuration diagnostics and automatic fixes: + +```bash +intent doctor [--fix] [--verbose] +``` + +#### Diagnostic Checks + +1. **Project Detection** + - Validates .intent/config.json + - Checks directory structure + +2. **Configuration Validation** + - JSON syntax verification + - Required field checks + - Version compatibility + +3. **Directory Structure** + - Ensures intent/ exists + - Validates backlog/ setup + +4. **Dependencies** + - jq availability + - Command accessibility + +#### Automatic Fixes (--fix) + +- Creates missing directories +- Initializes config files +- Fixes JSON formatting +- Updates version fields + +## 4.9 AS-BUILT Summary + +Intent v2.0.0 represents a complete implementation of the design with significant enhancements: + +1. **Unified Command Structure**: All commands follow intent_* pattern +2. **JSON Configuration**: Hierarchical config with validation +3. **Enhanced Integration**: Backlog.md with status filtering +4. **User Experience**: Bootstrap, doctor, and upgrade commands +5. **Self-Hosting**: Intent is developed using Intent itself diff --git a/intent/eng/tpd/5_implementation_strategy.md b/intent/eng/tpd/5_implementation_strategy.md new file mode 100644 index 0000000..76c3311 --- /dev/null +++ b/intent/eng/tpd/5_implementation_strategy.md @@ -0,0 +1,219 @@ +--- +verblock: "09 Jul 2025:v0.2: Matthew Sinclair - Updated for steel thread directory structure" +stp_version: 2.0.0 +--- +# 5. Implementation Strategy + +[index](<./technical_product_design.md>) + +## 5.1 Development Approach + +The Steel Thread Process (STP) will be developed using its own methodology - we will use steel threads to build the STP system itself. This meta-approach allows us to validate the process while creating it. + +The implementation will proceed in phases: + +1. **Foundation Phase**: Create core directory structure, basic templates, and essential scripts +2. **Functionality Phase**: Implement all command-line tools and complete templates +3. **Documentation Phase**: Create comprehensive documentation and guides +4. **Testing Phase**: Test in various environments and with different projects +5. **Refinement Phase**: Address feedback and optimize the system + +## 5.2 Steel Threads + +The STP system will be implemented through the following steel threads: + +| ID | Title | Description | +|----------------------------------|------------------------|--------------------------------------------------------------| +| [ST0009](../../prj/st/ST0009/) | Process Refinement | Refine overall process based on experience | +| [ST0008](../../prj/st/ST0008/) | LLM Integration | Create LLM preamble and canned prompts | +| [ST0007](../../prj/st/ST0007/) | User Documentation | Create user, reference, and deployment guides | +| [ST0006](../../prj/st/ST0006/) | Help System | Implement the help documentation system | +| [ST0005](../../prj/st/ST0005/) | Initialization Command | Implement project initialization | +| [ST0004](../../prj/st/ST0004/) | Steel Thread Commands | Implement commands for steel thread management | +| [ST0003](../../prj/st/ST0003/) | Template System | Create all document templates | +| [ST0002](../../prj/st/ST0002/) | Core Script Framework | Implement the main `stp` script and command dispatching | +| [ST0001](../../prj/st/ST0001/) | Directory Structure | Create the initial directory structure and placeholder files | + +## 5.3 Task Breakdown + +### ST0001: Directory Structure + +[ST0001](../../prj/st/ST0001/) + +- Create root level directories +- Create subdirectories for each component +- Create placeholder files for templates + +### ST0002: Core Script Framework + +[ST0002](../../prj/st/ST0002/) + +- Implement main `stp` script with command dispatching +- Implement environment variable handling +- Implement error handling framework +- Create script templates + +### ST0003: Template System + +[ST0003](../../prj/st/ST0003/) + +- Create templates for project documents + - Work in progress template + - Journal template + - Steel thread templates +- Create templates for engineering documents + - Technical product design templates +- Create templates for user documents + - User guide template + - Reference guide template + - Deployment guide template +- Create templates for LLM documents + - LLM preamble template + +### ST0004: Steel Thread Commands + +[ST0004](../../prj/st/ST0004/) + +- Implement `stp st new` command +- Implement `stp st done` command +- Implement `stp st list` command +- Implement steel thread status tracking + +### ST0005: Initialization Command + +[ST0005](../../prj/st/ST0005/) + +- Implement `stp init` command +- Implement template copying +- Implement directory creation +- Implement configuration initialization + +### ST0006: Help System + +[ST0006](../../prj/st/ST0006/) + +- Create help documentation structure +- Implement `stp help` command +- Create help content for all commands + +### ST0007: User Documentation + +[ST0007](../../prj/st/ST0007.md) + +- Create user guide content +- Create reference guide content +- Create deployment guide content + +### ST0008: LLM Integration + +[ST0008](../../prj/st/ST0008.md) + +- Create LLM preamble content +- Create canned prompts for common tasks +- Implement prompt management + +### ST0009: Process Refinement + +[ST0009](../../prj/st/ST0009.md) + +- Review and refine overall process +- Address feedback from earlier stages +- Optimize workflows + +## 5.4 Dependencies + +The implementation dependencies are as follows: + +``` +ST0001 ──► ST0002 ──► ST0004 ──► ST0007 + │ │ │ │ + │ │ │ ▼ + │ │ │ ST0009 + │ │ ▼ + │ │ ST0005 + │ │ + │ ▼ + │ ST0006 + │ + ▼ +ST0003 ────────────────► ST0008 +``` + +## 5.5 Timeline + +| Phase | Steel Threads | Timeline | +|-------|---------------|----------| +| Foundation | ST0001, ST0002, ST0003 | Week 1 | +| Functionality | ST0004, ST0005, ST0006 | Week 2 | +| Documentation | ST0007, ST0008 | Week 3 | +| Refinement | ST0009 | Week 4 | + +## 5.6 Environment Setup + +The development environment requires: + +- POSIX-compliant shell (bash, zsh) +- Git for version control +- Text editor with markdown support +- LLM access for assistance (e.g., Claude Code) + +## 5.7 Testing Strategy + +Testing will include: + +1. **Unit Testing**: Manual testing of individual commands +2. **Integration Testing**: Testing workflows with multiple commands +3. **Environment Testing**: Testing in different shell environments +4. **Project Testing**: Testing with sample projects +5. **LLM Testing**: Testing interaction with different LLMs + +## 5.8 Implementation Risks and Mitigations + +| Risk | Impact | Likelihood | Mitigation | +|-------------------------------------|--------|------------|--------------------------------------------------------------------------------| +| Shell script compatibility issues | High | Medium | Stick to POSIX-compatible features; test across environments | +| Complex workflows becoming unwieldy | Medium | Medium | Focus on simplicity; implement only essential functionality | +| Template maintenance overhead | Medium | Low | Design templates for minimal maintenance; use variables where appropriate | +| LLM integration challenges | High | Medium | Focus on general principles; provide platform-specific options where necessary | +| Documentation becoming outdated | Medium | High | Automate documentation updates; make updating easy | + +## 5.9 AS-BUILT Updates + +### 5.9.1 Actual Implementation Flow + +Intent v2.0.0 was developed through organic growth: + +``` +Phase 1: Foundation (v0.0.0) +ST0001 → ST0002 → ST0003 → ST0004 → ST0005 → ST0006 + +Phase 2: Enhancement (v1.0.0 - v1.2.1) +ST0007 → ST0008 → ST0009 → ST0010 → ST0011 → ST0012 + +Phase 3: Rebrand (v2.0.0) +ST0013 (blog) → ST0014 → ST0015 → ST0016 (migration) +``` + +### 5.9.2 Key Implementation Achievements + +1. **Self-Hosting Success**: Intent built using Intent methodology +2. **Test Coverage**: 86 tests covering all critical functionality +3. **Migration Tools**: Comprehensive upgrade from any version +4. **User Experience**: Bootstrap, doctor, and upgrade commands +5. **Documentation**: 7-part blog series and updated TPD + +### 5.9.3 Lessons Learned + +1. **JSON > YAML**: Better validation and hierarchy support +2. **Flattened Structure**: Easier navigation than nested directories +3. **Git Safety**: Wrapper commands prevent common errors +4. **Progressive Enhancement**: Core functionality first, then UX +5. **Self-Documentation**: Using the tool to build itself ensures accuracy + +### 5.9.4 Future Development + +Intent v2.0.0 provides a solid foundation for: +- Community contributions +- Enterprise adoption +- Educational use +- AI/LLM integration enhancements diff --git a/intent/eng/tpd/6_deployment_and_operations.md b/intent/eng/tpd/6_deployment_and_operations.md new file mode 100644 index 0000000..197a553 --- /dev/null +++ b/intent/eng/tpd/6_deployment_and_operations.md @@ -0,0 +1,358 @@ +--- +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Updated for Intent v2.1.0" +intent_version: 2.1.0 +--- +# 6. Deployment and Operations [AS-BUILT] + +[index](<./technical_product_design.md>) + +## 6.1 Installation [AS-BUILT] + +Intent v2.1.0 provides multiple installation methods with enhanced setup: + +### 6.1.1 Global Installation with Bootstrap + +The recommended approach uses the bootstrap command: + +```bash +# Clone the Intent repository +git clone https://github.com/matthewsinclair/intent.git ~/intent + +# Run bootstrap for automatic setup +cd ~/intent +./bin/intent bootstrap + +# Follow the instructions to add to PATH: +export INTENT_HOME=~/intent +export PATH=$PATH:$INTENT_HOME/bin +``` + +### 6.1.2 Manual Global Installation + +```bash +# Clone repository +git clone https://github.com/matthewsinclair/intent.git ~/intent + +# Add to shell profile (.bashrc, .zshrc, etc.) +echo 'export INTENT_HOME=~/intent' >> ~/.bashrc +echo 'export PATH=$PATH:$INTENT_HOME/bin' >> ~/.bashrc + +# Create global config +mkdir -p ~/.config/intent +echo '{ + "author": "Your Name", + "editor": "vim" +}' > ~/.config/intent/config.json + +# Reload shell +source ~/.bashrc +``` + +### 6.1.3 Project-Specific Installation + +Intent can be installed per-project: + +```bash +# From your project directory +git clone https://github.com/matthewsinclair/intent.git .intent-install + +# Create local alias +alias intent='./.intent-install/bin/intent' + +# Or add to PATH for this project +export PATH=$PATH:$(pwd)/.intent-install/bin +``` + +### 6.1.4 System Requirements + +- POSIX-compliant shell (bash 3.2+, zsh) +- Git for version control +- jq for JSON parsing (required) +- Optional: Backlog.md for task management + +## 6.2 Project Initialization [AS-BUILT] + +Intent v2.1.0 provides streamlined project setup: + +```bash +# Navigate to project directory +cd my-project + +# Initialize Intent +intent init "Project Name" +``` + +This creates: +- `.intent/config.json` - Project configuration +- `intent/` - Main directory structure +- `intent/st/` - Steel threads directory +- `intent/wip.md` - Work in progress +- `CLAUDE.md` - LLM guidelines + +### 6.2.1 Configuration During Init + +The init command prompts for: +- Project name +- Author name (defaults to $USER) +- Editor preference (defaults to $EDITOR) +- Backlog directory (defaults to "backlog") + +## 6.3 Configuration [AS-BUILT] + +Intent v2.1.0 uses hierarchical JSON configuration: + +### 6.3.1 Configuration Hierarchy + +1. **Environment Variables** (highest priority) + - `INTENT_HOME` - Installation directory + - `AUTHOR` - Default author + - `EDITOR` - Text editor + - `INTENT_*` - Override any config value + +2. **Local Project Config** (`.intent/config.json`) + ```json + { + "version": "2.1.0", + "project_name": "My Project", + "author": "username", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_dir": "backlog", + "intent_dir": "intent", + "backlog_list_status": "todo" + } + ``` + +3. **Global User Config** (`~/.config/intent/config.json`) + ```json + { + "author": "Your Name", + "editor": "vim", + "backlog_list_status": "wip" + } + ``` + +4. **Built-in Defaults** + +### 6.3.2 Configuration Management + +```bash +# Check configuration +intent doctor + +# Fix configuration issues +intent doctor --fix + +# View effective configuration +intent config show # (if implemented) +``` + +## 6.4 Operations [AS-BUILT] + +### 6.4.1 Creating Steel Threads + +```bash +# Create a new steel thread +intent st new "Implement Feature X" +# Creates: intent/st/ST####/info.md +``` + +Features: +- Auto-increments thread ID +- Creates directory structure +- Populates info.md template +- Optional: Creates design.md, tasks.md + +### 6.4.2 Working with Steel Threads + +```bash +# List all steel threads +intent st list + +# List by status +intent st list --status "In Progress" + +# Show thread contents +intent st show ST0001 +intent st show ST0001 design # Show specific file + +# Edit thread files +intent st edit ST0001 # Edit info.md +intent st edit ST0001 tasks # Edit tasks.md + +# Task integration +intent task create ST0001 "Implement login" +intent task list ST0001 +intent status show ST0001 +intent status sync ST0001 +``` + +### 6.4.3 Enhanced Backlog Integration + +```bash +# List tasks (respects backlog_list_status config) +intent bl list + +# List all tasks regardless of status +intent bl list --all + +# Create task linked to thread +intent bl create "ST0001 - Implement feature" + +# Mark task complete +intent bl done task-123 +``` + +### 6.4.4 Migration from STP + +```bash +# Upgrade any STP/Intent version to Intent v2.1.0 +intent upgrade + +# Custom backup directory +intent upgrade --backup-dir ./pre-v2-backup +``` + +### 6.4.5 LLM Integration + +```bash +# Display usage rules for LLMs +intent llm usage_rules + +# Create symlink for LLM access +intent llm usage_rules --symlink + +# Access project guidelines +cat CLAUDE.md +``` + +## 6.5 Maintenance [AS-BUILT] + +### 6.5.1 Diagnostics + +```bash +# Check for issues +intent doctor + +# Auto-fix problems +intent doctor --fix + +# Verbose output +intent doctor --verbose +``` + +Doctor checks: +- Configuration validity +- Directory structure +- JSON syntax +- Dependencies (jq) +- Version compatibility + +### 6.5.2 Updating Intent + +```bash +# Update global installation +cd $INTENT_HOME +git pull + +# Re-run bootstrap if needed +intent bootstrap --force +``` + +### 6.5.3 Backup and Recovery + +Intent documents should be version controlled: + +```bash +# Add Intent files to git +git add .intent/ intent/ CLAUDE.md +git commit -m "Update Intent documentation" + +# Exclude Backlog.md (has own git repo) +echo "backlog/" >> .gitignore +``` + +### 6.5.4 Testing + +```bash +# Run all tests +cd $INTENT_HOME +./tests/run_tests.sh + +# Run specific test suite +./tests/run_tests.sh tests/unit/st_commands.bats + +# Verbose output +./tests/run_tests.sh -v +``` + +## 6.6 Migration Guide [AS-BUILT] + +### 6.6.1 Migrating from STP/Intent to Intent v2.1.0 + +```bash +# 1. Check current version +ls -la .stp-config stp/ + +# 2. Run upgrade +intent upgrade + +# 3. Verify migration +intent doctor +ls -la .intent/ intent/ + +# 4. Update shell aliases +alias stp=intent # Temporary compatibility +``` + +### 6.6.2 Migration Changes + +| Old (STP) | New (Intent v2.1.0) | +|-----------|--------------------| +| stp/* | intent/* | +| .stp-config | .intent/config.json | +| stp commands | intent commands | +| YAML config | JSON config | +| Nested dirs | Flattened structure | + +## 6.7 Troubleshooting [AS-BUILT] + +Common issues and solutions: + +| Issue | Solution | +|-------|----------| +| Command not found | Run `intent bootstrap` and add to PATH | +| jq not found | Install jq: `brew install jq` or `apt install jq` | +| Permission denied | Run `chmod +x $INTENT_HOME/bin/*` | +| Config errors | Run `intent doctor --fix` | +| Migration fails | Check backup, run with `--no-backup` if safe | +| Tests fail | Ensure bash 3.2+ and BATS installed | + +### 6.7.1 Debug Mode + +```bash +# Enable debug output +export INTENT_DEBUG=1 +intent st list + +# Verbose help +intent help --verbose +``` + +### 6.7.2 Support Resources + +- Blog series: `docs/blog/` +- GitHub issues: Project repository +- CLAUDE.md: Project-specific help +- This TPD: Technical reference + +## 6.8 AS-BUILT Summary + +Intent v2.1.0 deployment features: + +1. **Bootstrap**: Automated global setup +2. **Doctor**: Diagnostics and fixes +3. **Upgrade**: Migration from any version +4. **JSON Config**: Hierarchical settings +5. **Enhanced UX**: Better error messages +6. **Self-Hosting**: Proven through use diff --git a/intent/eng/tpd/7_technical_challenges_and_mitigations.md b/intent/eng/tpd/7_technical_challenges_and_mitigations.md new file mode 100644 index 0000000..1ccb47d --- /dev/null +++ b/intent/eng/tpd/7_technical_challenges_and_mitigations.md @@ -0,0 +1,205 @@ +--- +verblock: "17 Jul 2025:v2.0.0: Matthew Sinclair - Updated for Intent v2.0.0 (As-Built)" +intent_version: 2.0.0 +--- +# 7. Technical Challenges and Mitigations [AS-BUILT] + +[index](<./technical_product_design.md>) + +## 7.1 LLM Context Window Management + +### 7.1.1 Challenge + +LLMs have finite context windows, limiting the amount of information that can be processed in a single interaction. This constraint can impact the LLM's ability to understand the full project context. + +### 7.1.2 Mitigation Strategies + +- **Document Segmentation**: Break documentation into logical segments that fit within context windows +- **Strategic Information Sharing**: Provide only relevant documents for specific tasks +- **Context Summarization**: Include brief summaries at the start of documents +- **Cross-Referencing**: Use clear references between documents to help the LLM locate related information +- **Progressive Disclosure**: Share information in stages as needed for specific tasks + +## 7.2 Shell Script Portability + +### 7.2.1 Challenge + +Shell scripts may behave differently across operating systems and shell environments, leading to inconsistent user experiences. + +### 7.2.2 Mitigation Strategies + +- **POSIX Compliance**: Use only POSIX-compliant shell features +- **Environment Detection**: Detect environment and adapt behavior when necessary +- **Minimal Dependencies**: Avoid relying on non-standard utilities +- **Extensive Testing**: Test across multiple environments +- **Clear Error Messages**: Provide clear error messages for environment-specific issues + +## 7.3 Documentation Maintenance + +### 7.3.1 Challenge + +As projects evolve, documentation can become outdated, reducing its value for both humans and LLMs. + +### 7.3.2 Mitigation Strategies + +- **LLM-Assisted Updates**: Leverage LLMs to help maintain documentation +- **Version Tracking**: Include version information in documents +- **Regular Reviews**: Incorporate documentation review into the development process +- **Automation**: Automate aspects of documentation management +- **Simplified Structure**: Keep documentation structure simple to minimize maintenance overhead + +## 7.4 LLM Platform Differences + +### 7.4.1 Challenge + +Different LLM platforms have varying capabilities, interfaces, and limitations, complicating consistent integration. + +### 7.4.2 Mitigation Strategies + +- **Platform-Agnostic Design**: Focus on principles that work across LLM platforms +- **Configurable Instructions**: Allow customization of LLM instructions based on platform +- **Feature Detection**: Provide options based on LLM capabilities +- **Minimal Assumptions**: Make minimal assumptions about LLM behavior +- **Clear Guidelines**: Provide clear guidelines for different LLM platforms + +## 7.5 Process Adoption Barriers + +### 7.5.1 Challenge + +Developers may resist adopting new processes, especially those requiring significant changes to workflow. + +### 7.5.2 Mitigation Strategies + +- **Incremental Value**: Intent provides value even with partial adoption +- **Low Friction**: Bootstrap command minimizes setup effort +- **Clear Benefits**: Blog series communicates benefits clearly +- **Integration Flexibility**: Works alongside existing workflows +- **Good Documentation**: Comprehensive TPD and blog posts + +## 7.6 Template Management + +### 7.6.1 Challenge + +Managing and updating templates across multiple projects can become complex. + +### 7.6.2 Mitigation Strategies + +- **Template Versioning**: Clear version information for templates +- **Synchronization Tools**: Tools to sync templates between projects +- **Project-Specific Customization**: Allow project-specific template customization +- **Minimal Dependencies**: Minimize dependencies between templates +- **Clear Structure**: Maintain a clear, logical template structure + +## 7.7 Scale to Large Projects + +### 7.7.1 Challenge + +As projects grow, the volume of documentation and steel threads may become unwieldy. + +### 7.7.2 Mitigation Strategies + +- **Hierarchical Organization**: Organize documentation hierarchically +- **Search Support**: Support for searching documentation +- **Modular Approach**: Break large projects into modules +- **Linking and References**: Clear linking between related content +- **Archive Mechanisms**: Methods to archive completed steel threads + +## 7.8 LLM Token Optimization + +### 7.8.1 Challenge + +Inefficient use of LLM tokens can lead to higher costs and slower interactions. + +### 7.8.2 Mitigation Strategies + +- **Concise Documentation**: Focus on clarity and conciseness +- **Strategic Information Sharing**: Share only what's needed for specific tasks +- **Template Optimization**: Design templates for token efficiency +- **Progressive Disclosure**: Share information in stages as needed +- **Reuse Context**: Maintain context across related interactions + +## 7.9 Version Control Integration + +### 7.9.1 Challenge + +Integrating STP documentation with version control systems may lead to conflicts or management challenges. + +### 7.9.2 Mitigation Strategies + +- **VCS Agnostic Design**: Design for compatibility with various VCS +- **Clear Ignore Patterns**: Provide appropriate .gitignore patterns +- **Conflict Resolution Guidelines**: Guidelines for resolving documentation conflicts +- **Atomic Updates**: Encourage atomic documentation updates +- **Merge Strategies**: Recommend appropriate merge strategies for documentation + +## 7.10 Security Considerations + +### 7.10.1 Challenge + +Documentation may inadvertently contain sensitive information that should not be shared with LLMs. + +### 7.10.2 Mitigation Strategies + +- **Sensitive Information Guidelines**: Clear guidelines for what should not be included +- **Credential Management**: Never include credentials in documentation +- **Isolation of Concerns**: Separate sensitive and non-sensitive information +- **Review Process**: Review for sensitive information before sharing +- **Redaction Patterns**: Patterns for redacting sensitive information + +## 7.11 AS-BUILT: v2.0.0 Specific Challenges + +### 7.11.1 YAML Frontmatter Migration Bug + +**Challenge**: Files without YAML frontmatter caused migration failures. + +**Resolution**: Modified `convert_yaml_frontmatter` to handle edge case: +```bash +if ! head -1 "$file" | grep -q "^---$"; then + cp "$file" "$temp_file" # Just copy if no frontmatter + return 0 +fi +``` + +### 7.11.2 Test Migration Complexity + +**Challenge**: Lost ~100 tests during v2.0.0 migration. + +**Resolution**: +- Focused on core functionality (86 tests) +- Documented lost tests for future recovery +- Prioritized critical path testing + +### 7.11.3 Configuration Filtering + +**Challenge**: `intent bl list` not respecting configuration. + +**Resolution**: +- Added `backlog_list_status` to config loading +- Implemented `--all` flag override +- Test-driven development approach + +### 7.11.4 Blog Post Recovery + +**Challenge**: Blog posts accidentally deleted during cleanup. + +**Resolution**: +- Restored from git history (commit b65b8c9) +- Updated all STP references to Intent +- Fixed internal links between posts + +### 7.11.5 Directory Structure Flattening + +**Challenge**: Complex migration from nested to flat structure. + +**Resolution**: +- Comprehensive upgrade script +- Automatic backup creation +- Clear migration path documentation + +## 7.12 Lessons Learned + +1. **Self-Hosting Validates Design**: Using Intent to build Intent exposed issues early +2. **Test Coverage Essential**: BATS tests prevented regressions during migration +3. **Git History Invaluable**: Ability to restore lost files saved the project +4. **User Experience Matters**: Bootstrap, doctor, upgrade commands improve adoption +5. **Documentation as Code**: Blog series and TPD updates tracked in git diff --git a/intent/eng/tpd/8_appendices.md b/intent/eng/tpd/8_appendices.md new file mode 100644 index 0000000..8ac07fc --- /dev/null +++ b/intent/eng/tpd/8_appendices.md @@ -0,0 +1,263 @@ +--- +verblock: "17 Jul 2025:v2.0.0: Matthew Sinclair - Updated for Intent v2.0.0 (As-Built)" +intent_version: 2.0.0 +--- +# 8. Appendices + +[index](<./technical_product_design.md>) + +## 8.1 Glossary + +| Term | Definition | +|----------------|--------------------------------------------------------------------------------------------------| +| LLM | Large Language Model - An AI system capable of understanding and generating human language, | +| | such as Claude, GPT, etc. | +| Steel Thread | A self-contained unit of work that represents a logical piece of functionality to be implemented | +| Context Window | The amount of text an LLM can process in a single interaction | +| Intent | The system described in this document - captures and preserves development intention | +| STP | Steel Thread Process - The original name for Intent (pre-v2.0.0) | +| TPD | Technical Product Design - A comprehensive technical specification document | +| Backlog.md | Task management system integrated with Intent for fine-grained task tracking | +| Bootstrap | Automated setup process for Intent global installation | + +## 8.2 Command Reference [AS-BUILT] + +### 8.2.1 Primary Commands + +| Command | Description | Usage | +|---------|-------------|-------| +| `intent init` | Initialize Intent in a project | `intent init "Project Name"` | +| `intent st` | Manage steel threads | `intent st new/list/show/edit` | +| `intent bl` | Enhanced Backlog.md wrapper | `intent bl list/create/done` | +| `intent task` | Manage tasks linked to threads | `intent task create/list/count` | +| `intent status` | Synchronize thread/task status | `intent status show/sync/check` | +| `intent bootstrap` | Global Intent setup | `intent bootstrap [--force]` | +| `intent doctor` | Diagnose configuration | `intent doctor [--fix]` | +| `intent upgrade` | Migrate from STP | `intent upgrade [--backup-dir]` | +| `intent help` | Display help | `intent help [command]` | +| `intent llm` | LLM integration | `intent llm usage_rules` | + +### 8.2.2 Configuration Schema + +```json +// .intent/config.json +{ + "version": "2.0.0", // Required: Intent version + "project_name": "string", // Required: Project name + "author": "string", // Optional: Default author + "created": "YYYY-MM-DD", // Auto-generated: Creation date + "st_prefix": "ST", // Optional: Steel thread prefix (default: ST) + "backlog_dir": "backlog", // Optional: Backlog directory (default: backlog) + "intent_dir": "intent", // Optional: Intent directory (default: intent) + "backlog_list_status": "todo" // Optional: Default status filter (default: todo) +} +``` + +### 8.2.3 Global Configuration + +```json +// ~/.config/intent/config.json +{ + "author": "Your Name", // Default author for all projects + "editor": "vim", // Preferred text editor + "backlog_list_status": "wip" // Global default status filter +} +``` + +### 8.2.4 Command Examples + +```bash +# Initialize new project +intent init "My Project" + +# Create and manage steel threads +intent st new "Implement OAuth2 authentication" +intent st list --status "In Progress" +intent st show ST0015 +intent st edit ST0015 design + +# Task management with Backlog integration +intent task create ST0015 "Design auth flow" +intent bl list # Filtered by config +intent bl list --all # All tasks +intent bl done task-123 + +# Status synchronization +intent status show ST0015 +intent status sync ST0015 + +# System maintenance +intent doctor --fix +intent upgrade --backup-dir ./backup +intent bootstrap --force +``` + +### 8.2.5 Environment Variables + +| Variable | Description | Default | +|----------|-------------|----------| +| `INTENT_HOME` | Intent installation directory | Required | +| `AUTHOR` | Default author name | $USER | +| `EDITOR` | Preferred text editor | vim | +| `INTENT_DEBUG` | Enable debug output | unset | +| `INTENT_*` | Override any config value | unset | + +Example usage: +```bash +export INTENT_HOME=~/intent +export INTENT_BACKLOG_LIST_STATUS=wip # Override default filter +export INTENT_DEBUG=1 # Enable debug mode +``` + +## 8.3 Template Examples + +### 8.3.1 Work in Progress Template + +```markdown +# Work In Progress + +## Current Focus +[Brief description of the current development focus] + +## Active Steel Threads +- ST####: [Brief description] +- ... + +## Upcoming Work +- [Item 1] +- ... + +## Notes +[Any additional notes about the current work] +``` + +### 8.3.2 Steel Thread Template + +```markdown +# ST####: [Title] + +- **Status**: [Not Started|In Progress|Completed] +- **Created**: YYYY-MM-DD +- **Completed**: YYYY-MM-DD + +## Objective +[Clear statement of what this steel thread aims to accomplish] + +## Context +[Background information and context for this steel thread] + +## Approach +[Planned approach for implementing this steel thread] + +## Tasks +- [ ] Task 1 +- [ ] Task 2 +- ... + +## Implementation Notes +[Notes on implementation details, decisions, challenges, etc.] + +## Results +[Summary of results after completion] +``` + +## 8.4 Process Examples + +### 8.4.1 New Steel Thread Process + +1. **Identify Need**: Developer identifies a discrete piece of work +2. **Create Steel Thread**: `stp st new "Implement Feature X"` +3. **Define Objective**: Update steel thread document with clear objective +4. **Implement Tasks**: Work through tasks with LLM assistance +5. **Document Progress**: Update documentation as work progresses +6. **Complete Steel Thread**: `stp st done ST####` + +### 8.4.2 LLM Collaboration Example + +**Developer Prompt:** + +``` +I need to implement a new feature for user authentication. Let's create a new steel thread for this. +``` + +**LLM Response:** + +``` +I'll help you create a new steel thread for user authentication. Let's use Intent: + +1. First, let's create the steel thread: + $ intent st new "User Authentication Implementation" + +2. This will create ST0005 directory (assuming your last steel thread was ST0004). + +3. Let's define the objective and approach in the steel thread document... +``` + +## 8.5 References + +- POSIX Shell Command Language: [IEEE Std 1003.1-2017](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html) +- Markdown Guide: [Basic Syntax](https://www.markdownguide.org/basic-syntax/) +- Documentation Best Practices: [Write the Docs](https://www.writethedocs.org/guide/writing/beginners-guide-to-docs/) +- LLM Prompt Engineering: [Claude Prompt Design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design) +- Shell Scripting Guide: [Bash Hackers Wiki](https://wiki.bash-hackers.org/) + +## 8.6 Future Enhancements + +- **Web Interface**: A lightweight web interface for viewing Intent documentation +- **Notification System**: Notifications for steel thread status changes +- **Analytics**: Statistics on steel thread progress and completion +- **Integration Plugins**: Deeper integration with version control and issue tracking +- **Templating Extensions**: More sophisticated templating with variables +- **Cross-Project References**: References between related projects +- **Document Generation**: Automatic generation of summary reports +- **Collaborative Editing**: Support for collaborative editing of documents +- **AI Integration**: Enhanced LLM workflows and context management +- **Mobile Support**: Mobile-friendly documentation viewing + +## 8.7 Integration References + +### 8.7.1 Backlog.md Integration + +- **Integration Guide**: `/intent/llm/usage-rules.md#task-management-integration` - Comprehensive guide for using the integration +- **Backlog.md Documentation**: [https://github.com/slune-org/backlog](https://github.com/slune-org/backlog) +- **Integration Tests**: + - `/tests/unit/task_commands.bats` - Task management command tests + - `/tests/unit/status_commands.bats` - Status synchronisation tests + - `/tests/unit/backlog_wrapper.bats` - Wrapper command tests + - `/tests/integration/` - End-to-end tests +- **Implementation Scripts**: + - `/bin/intent_backlog` - Backlog wrapper implementation + - `/bin/intent_task` - Task management implementation + - `/bin/intent_status` - Status synchronisation implementation + - `/bin/intent_migrate` - Migration tool implementation + +### 8.7.2 Integration Architecture + +For technical details on the Backlog.md integration architecture, see: +- Section 3.6.1: Backlog.md Integration Architecture +- Section 4.8.1: Backlog.md Integration Implementation Details +- Blog Post: [LLM Collaboration with Intent](../../docs/blog/0004-llm-collaboration-with-intent.md) + +## 8.8 AS-BUILT Notes + +### 8.8.1 Version History + +| Version | Date | Changes | +|---------|------|---------| +| 2.0.0 | 2025-07-17 | Complete rebrand to Intent, JSON config, enhanced UX | +| 1.2.1 | 2025-07-09 | Added Backlog.md integration | +| 1.0.0 | 2025-06-15 | Initial STP release | + +### 8.8.2 Test Coverage + +- 86 tests passing (BATS framework) +- Core functionality: 100% covered +- Integration tests: Backlog.md wrapper +- Lost tests documented for recovery + +### 8.8.3 Known Limitations + +1. **Reduced test coverage**: ~100 tests lost during migration +2. **Limited error recovery**: Some edge cases need handling +3. **Documentation gaps**: Some advanced features undocumented +4. **Platform testing**: Primarily tested on macOS/Linux diff --git a/intent/eng/tpd/technical_product_design.md b/intent/eng/tpd/technical_product_design.md new file mode 100644 index 0000000..fb1e710 --- /dev/null +++ b/intent/eng/tpd/technical_product_design.md @@ -0,0 +1,115 @@ +--- +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Update for Intent v2.1.0 with agent init" +intent_version: 2.1.0 +--- +# Intent Technical Product Design v2.1.0 (As-Built) + +## Preamble to Claude + +This document is a Technical Product Design (TPD) for the Intent system (formerly known as STP - Steel Thread Process). When processing this document, please understand: + +1. This is the AS-BUILT documentation for Intent v2.1.0, reflecting the actual implementation +2. Intent underwent a complete rebrand from STP to Intent in July 2025 +3. The system is designed to facilitate collaboration between developers and LLMs +4. This document contains: + - Actual v2.1.0 architecture and implementation + - JSON-based configuration system + - Complete command reference for intent_* commands + - Migration tools and processes + - Lessons learned from development + +5. The code is developed through "steel threads" which are incremental implementation stages +6. Steel threads are organized as directories under intent/st/ containing: + - info.md: Main information and metadata (required) + - design.md: Design decisions and approach (optional) + - impl.md: Implementation details (optional) + - tasks.md: Task tracking or Backlog.md integration (optional) +7. The system consists of shell scripts and markdown templates +8. Configuration uses JSON format (.intent/config.json) instead of YAML +9. The system integrates with Backlog.md for task management with enhanced filtering +10. Intent is self-hosting - this project is built using Intent v2.1.0 +11. Intent v2.1.0 includes Claude Code sub-agent integration with proper initialization for enhanced AI collaboration +12. Key commands include: + +- `intent st list`: List all steel threads with status filtering +- `intent st new`: Create a new steel thread +- `intent st show`: Display steel thread contents +- `intent st edit`: Edit steel thread files +- `intent bl`: Enhanced Backlog.md wrapper with status filtering +- `intent task`: Manage Backlog tasks linked to steel threads +- `intent status`: Synchronize steel thread status with task completion +- `intent agents`: Manage Claude Code sub-agents for Intent projects +- `intent init`: Initialize a new Intent project +- `intent bootstrap`: Global Intent setup and configuration +- `intent doctor`: Diagnose and fix configuration issues +- `intent upgrade`: Migrate any STP/Intent version to Intent v2.1.0 +- `intent help`: Unified help system for all commands + +# Intent v2.1.0 Technical Product Design + +This document serves as the central index for the Technical Product Design (TPD) of Intent v2.1.0. The TPD has been forensically updated to reflect the actual as-built state of the system after the migration from STP to Intent and enhancement with agent initialization. Sections marked with "[AS-BUILT]" indicate deviations from the original design. + +## Table of Contents + +1. [Introduction](./1_introduction.md) +2. [Requirements](./2_requirements.md) +3. [Architecture](./3_architecture.md) +4. [Detailed Design](./4_detailed_design.md) +5. [Implementation Strategy](./5_implementation_strategy.md) +6. [Deployment and Operations](./6_deployment_and_operations.md) +7. [Technical Challenges and Mitigations](./7_technical_challenges_and_mitigations.md) +8. [Appendices](./8_appendices.md) + +## Agent System (v2.1.0) [AS-BUILT] + +Intent v2.1.0 includes Claude Code sub-agent integration with proper initialization, enhancing AI collaboration: + +### Architecture +- **Agent Storage**: `$INTENT_HOME/agents/` (global), `./intent/agents/` (project) +- **Installation Target**: `~/.claude/agents/` +- **Manifest Tracking**: JSON manifests track installations and checksums +- **Sync Mechanism**: File-based sync with modification detection + +### Available Agents +1. **Intent Agent**: Understands steel threads, Intent commands, and project structure +2. **Elixir Agent**: Elixir code doctor with Usage Rules and Ash/Phoenix patterns +3. **Socrates Agent**: CTO Review Mode for technical decision-making via Socratic dialog + +### Agent Commands +- `intent agents init`: Initialize agent configuration +- `intent agents list`: Show available and installed agents +- `intent agents install`: Install agents to Claude configuration +- `intent agents sync`: Update agents while preserving modifications +- `intent agents uninstall`: Remove Intent-managed agents +- `intent agents show`: Display agent details and metadata +- `intent agents status`: Check agent health and integrity + +### Integration Points +- **intent init**: Detects Claude and offers agent installation +- **intent doctor**: Includes agent health checks +- **intent upgrade**: Preserves agent directories during migration + +## Migration Notes + +Intent v2.1.0 represents a complete rebrand and restructuring from the Steel Thread Process (STP) to Intent: + +- Directory structure flattened: `stp/prj/st/` → `intent/st/` +- Commands renamed: `stp_*` → `intent_*` +- Configuration migrated: YAML → JSON +- New features: bootstrap, doctor, upgrade commands +- Enhanced Backlog.md integration with status filtering +- Self-hosting success: Intent is built using Intent + +## Current Status + +- **Version**: 2.1.0 (Enhanced Agent System with init command - July 2025) +- **Tests**: 165/165 passing (includes 50 new agent tests) +- **Commands**: 13 primary commands including new `intent agents` +- **Agents**: 2 built-in agents (Intent, Elixir) +- **Projects Using Intent**: Intent itself (self-hosting) + +## Links + +- [Current Steel Threads](../../st/) +- [Intent Blog Series](../../../docs/blog/) +- [Migration Guide](./6_deployment_and_operations.md#migration) diff --git a/intent/llm/AGENTS.md b/intent/llm/AGENTS.md new file mode 100644 index 0000000..3f5352a --- /dev/null +++ b/intent/llm/AGENTS.md @@ -0,0 +1,118 @@ +# AGENTS.md + +## Project Overview + +This is an Intent project. See CLAUDE.md for project-specific guidelines. + +## Development Environment + +### Prerequisites +- Bash 4.0 or higher +- POSIX-compliant shell environment +- Bats testing framework + +### Setup +```bash +# Initialize Intent project +intent init + +# Install dependencies (if applicable) +``` + +## Build and Test Commands + +### Testing +```bash +# Run tests +bats tests/*.bats +``` + +### Building +No build process required. + +### Validation +```bash +# Check Intent configuration +intent doctor + +# Validate project structure +intent st list +``` + +## Code Style Guidelines + +- Shell scripts: 2-space indentation, POSIX compliance +- Markdown: Standard formatting with verblock headers +- Follow existing patterns in the codebase + +See `intent/llm/usage-rules.md` for detailed code style rules. + +## Testing Instructions + +All changes should be validated with the test suite before committing. + +1. Run the full test suite before making changes to establish baseline +2. Make your changes +3. Run tests again to ensure nothing broke +4. Add new tests for new functionality + +## Commit and PR Guidelines + +### Commit Messages +- Use conventional commit format when applicable +- Be descriptive about what changed and why +- Reference steel thread IDs (e.g., "ST0018: Add AGENTS.md support") + +### Pull Requests +- Include test results in PR description +- Reference related steel threads +- Update documentation alongside code changes + +## Intent-Specific Information + +### Steel Thread Process +This project uses Intent's Steel Thread Process for development: +- Work is organized into steel threads (ST####) +- Each thread is a self-contained unit of work +- View threads: `intent st list` +- Create thread: `intent st new "Title"` + +### Available Commands +```bash +intent st list # List all steel threads +intent st new "Title" # Create new steel thread +intent st show <id> # Show steel thread details +intent bl # Manage backlog (if configured) +intent doctor # Check configuration +intent agents sync # Update this AGENTS.md file +``` + +### Claude Subagents +This project has Claude Code subagents available: +```bash +intent claude subagents list # List available subagents +intent claude subagents install # Install a subagent +``` + +Available subagents: +- **ash-expert**: Modern Ash 3.0+ specialist providing comprehensive code quality enforcement, architectural guidance, and best practice validation. Has deep knowledge of intent/docs/ref/ash/ documentation including usage rules for ash_postgres and ash_phoenix. Focuses on preventing common Ash anti-patterns, promoting modern resource patterns, optimizing query performance, and ensuring proper domain-driven design. Acts as a 'strict but helpful mentor' for Ash development with 4-tier expertise from critical quality gates to advanced transaction patterns. +- **elixir**: Elixir code doctor with functional programming expertise and Usage Rules integration +- **intent**: Intent-aware assistant for steel threads and backlog management +- **socrates**: CTO Review Mode - Socratic dialog for technical decision-making +- **worker-bee**: Worker-Bee Driven Design specialist for any Elixir application. Conducts interactive project structure mapping, enforces WDD 6-layer architecture compliance, validates functional core purity, and scaffolds WDD-compliant code. Works with Phoenix, OTP, libraries, and any Elixir project type. + +## Security Considerations + +- Never commit sensitive information (keys, passwords, tokens) +- Review all changes for security implications +- Follow secure coding practices + +## Additional Resources + +- Project documentation: `intent/docs/` +- Steel threads: `intent/st/` +- LLM guidelines: `intent/llm/` +- Claude-specific instructions: `CLAUDE.md` + +--- +*Generated by Intent v2.3.2 on 2025-09-04* diff --git a/intent/llm/AGENTS.md.bak b/intent/llm/AGENTS.md.bak new file mode 100644 index 0000000..297e003 --- /dev/null +++ b/intent/llm/AGENTS.md.bak @@ -0,0 +1,116 @@ +# AGENTS.md + +## Project Overview + +This is an Intent project. See CLAUDE.md for project-specific guidelines. + +## Development Environment + +### Prerequisites +- Bash 4.0 or higher +- POSIX-compliant shell environment +- Bats testing framework + +### Setup +```bash +# Initialize Intent project +intent init + +# Install dependencies (if applicable) +``` + +## Build and Test Commands + +### Testing +```bash +# Run tests +bats tests/*.bats +``` + +### Building +No build process required. + +### Validation +```bash +# Check Intent configuration +intent doctor + +# Validate project structure +intent st list +``` + +## Code Style Guidelines + +- Shell scripts: 2-space indentation, POSIX compliance +- Markdown: Standard formatting with verblock headers +- Follow existing patterns in the codebase + +See `intent/llm/usage-rules.md` for detailed code style rules. + +## Testing Instructions + +All changes should be validated with the test suite before committing. + +1. Run the full test suite before making changes to establish baseline +2. Make your changes +3. Run tests again to ensure nothing broke +4. Add new tests for new functionality + +## Commit and PR Guidelines + +### Commit Messages +- Use conventional commit format when applicable +- Be descriptive about what changed and why +- Reference steel thread IDs (e.g., "ST0018: Add AGENTS.md support") + +### Pull Requests +- Include test results in PR description +- Reference related steel threads +- Update documentation alongside code changes + +## Intent-Specific Information + +### Steel Thread Process +This project uses Intent's Steel Thread Process for development: +- Work is organized into steel threads (ST####) +- Each thread is a self-contained unit of work +- View threads: `intent st list` +- Create thread: `intent st new "Title"` + +### Available Commands +```bash +intent st list # List all steel threads +intent st new "Title" # Create new steel thread +intent st show <id> # Show steel thread details +intent bl # Manage backlog (if configured) +intent doctor # Check configuration +intent agents sync # Update this AGENTS.md file +``` + +### Claude Subagents +This project has Claude Code subagents available: +```bash +intent claude subagents list # List available subagents +intent claude subagents install # Install a subagent +``` + +Available subagents: +- **elixir**: Elixir code doctor with functional programming expertise and Usage Rules integration +- **intent**: Intent-aware assistant for steel threads and backlog management +- **socrates**: CTO Review Mode - Socratic dialog for technical decision-making + +## Security Considerations + +- Never commit sensitive information (keys, passwords, tokens) +- Review all changes for security implications +- Follow secure coding practices + +## Additional Resources + +- Project documentation: `intent/docs/` +- Steel threads: `intent/st/` +- LLM guidelines: `intent/llm/` +- Claude-specific instructions: `CLAUDE.md` + +--- +*Generated by Intent v2.2.1 on 2025-08-20* diff --git a/intent/llm/llm_preamble.md b/intent/llm/llm_preamble.md new file mode 100644 index 0000000..d39ac6b --- /dev/null +++ b/intent/llm/llm_preamble.md @@ -0,0 +1,227 @@ +--- +verblock: "20 Aug 2025:v0.3: DEPRECATED - Replaced by AGENTS.md in Intent v2.3.0" +intent_version: 2.1.0 +--- +# LLM Preamble [DEPRECATED] + +> **⚠️ DEPRECATION NOTICE**: This file is deprecated as of Intent v2.3.0. +> LLM context is now provided via AGENTS.md which follows the universal +> AGENTS.md specification. See the AGENTS.md file in this project. +> +> This file is kept for backward compatibility but will be removed in a future version. + +This document provides essential context for LLMs working with Intent projects. + +## Project Overview + +Intent (formerly STP - Steel Thread Process) is a system designed to create a structured workflow and documentation process for developers collaborating with Large Language Models. It provides: + +1. A standardized directory structure for project documentation +2. Shell scripts for managing project workflows +3. A methodology centered around "steel threads" - self-contained units of work +4. Markdown templates for documentation +5. Integration with Claude Code sub-agents for specialized assistance + +The system is intentionally lightweight, using shell scripts and markdown files to maximize portability and minimize dependencies. It integrates with existing development workflows and helps preserve context across development sessions with LLMs. + +## Navigation Guide + +When working with an Intent project, focus on these key documents in order: + +1. **START HERE**: `CLAUDE.md` - Project-specific guidelines and instructions +2. **NEXT**: `intent/st/` - Review steel thread directories for project history +3. **THEN**: `Backlog.md` (if exists) - Current tasks and priorities +4. **REFERENCE**: `intent/docs/` - Technical documentation + +## Key System Components + +The Intent system consists of: + +1. **Core Commands**: + - `intent st new "Title"` - Create a new steel thread + - `intent st list` - List all steel threads + - `intent st show <id>` - Show steel thread details + - `intent agents` - Manage AI agents + - `intent doctor` - Check configuration + +2. **Directory Structure**: + - `intent/` - Project artifacts (steel threads, docs, work tracking) + - `intent/st/` - Steel threads organized as directories + - `intent/docs/` - Technical documentation + - `intent/llm/` - LLM-specific guidelines + - `backlog/` - Task management (if using Backlog.md) + - `.intent/` - Configuration and metadata + +3. **Agent System**: Specialized AI assistants for domain-specific tasks + +# Intent Agent System Guide + +## Overview + +Intent integrates with Claude Code's sub-agent feature to provide specialized AI assistants. These agents extend Claude's capabilities with domain-specific knowledge and focused expertise. + +## Understanding Intent Agents + +Intent agents are Claude Code sub-agents that: +- Have their own context window separate from the main conversation +- Possess specialized knowledge and system prompts +- Access a focused set of tools appropriate to their domain +- Return comprehensive results that you can use in your main workflow + +## Agent Architecture + +``` +intent agents init # Initialize agent configuration +intent agents list # Show available and installed agents +intent agents install # Install agents to ~/.claude/agents/ +intent agents sync # Update agents to latest versions +intent agents status # Check agent health +``` + +Agents are stored in: +- `$INTENT_HOME/agents/` - Global agents shipped with Intent +- `./intent/agents/` - Project-specific custom agents +- `~/.claude/agents/` - Where Claude Code reads installed agents + +## Available Intent Agents + +### intent - Intent Methodology Specialist + +**Expertise:** +- Steel thread methodology and management +- Intent command usage and best practices +- Backlog task tracking with Backlog.md +- Project structure and organization +- Migration from STP to Intent v2.x + +**Use Cases:** +- `Task(description="Create steel thread", prompt="/create-st 'User authentication system'", subagent_type="intent")` +- `Task(description="Review project structure", prompt="Analyze this project's Intent setup and suggest improvements", subagent_type="intent")` +- `Task(description="Manage backlog", prompt="Review Backlog.md and prioritize tasks", subagent_type="intent")` + +### elixir - Elixir Code Doctor + +**Expertise:** +- Elixir Usage Rules (19 best practices) +- Functional programming patterns +- Ash framework patterns and resources +- Phoenix framework best practices +- Code review and optimization + +**Use Cases:** +- `Task(description="Review Elixir code", prompt="Review this module for Usage Rules compliance", subagent_type="elixir")` +- `Task(description="Implement Ash resource", prompt="Create an Ash resource for user profiles with proper actions", subagent_type="elixir")` +- `Task(description="Optimize GenServer", prompt="Review this GenServer for performance and suggest improvements", subagent_type="elixir")` + +## When to Use Agents vs Main Claude + +### Use Specialized Agents When: +- Task requires deep domain knowledge (Intent methodology, Elixir patterns) +- Performing focused code reviews or audits +- Following specific methodologies or frameworks +- Task is well-bounded and doesn't require broad context +- You need expert guidance on best practices + +### Use Main Claude When: +- Task requires understanding the full project context +- Integrating multiple systems or languages +- General programming tasks +- Tasks that span multiple domains +- Exploratory work or debugging + +## Agent Invocation Patterns + +### Basic Pattern +``` +Task( + description="Short task description", + prompt="Detailed instructions for the agent", + subagent_type="agent_name" +) +``` + +### Complex Workflow Pattern +Chain multiple agents for sophisticated workflows: + +1. Use intent agent to understand project structure +2. Use main Claude to implement features +3. Use elixir agent to review and optimize code + +### Slash Command Pattern +Agents can execute custom slash commands: +``` +Task( + description="Execute command", + prompt="/check-usage-rules path/to/module.ex", + subagent_type="elixir" +) +``` + +## Best Practices + +1. **Be Specific**: Agents work best with clear, focused prompts +2. **Provide Context**: Include relevant file paths and specific requirements +3. **Batch Related Tasks**: Group similar tasks for the same agent +4. **Trust Agent Expertise**: Agents have specialized knowledge - defer to their recommendations +5. **Use Proactively**: Don't wait for users to ask - use agents when appropriate + +## Creating Custom Project Agents + +Projects can define custom agents in `./intent/agents/`: + +```yaml +--- +name: security-reviewer +tools: + - Read + - Grep + - Bash +--- + +You are a security specialist for this project... +``` + +Install with: `intent agents install ./intent/agents/security-reviewer.md` + +## Integration with Intent Workflow + +1. **During intent init**: Offers to install agents if Claude Code is detected +2. **With steel threads**: Intent agent understands ST methodology +3. **For code review**: Agents can be invoked after significant code changes +4. **In CI/CD**: Project agents can enforce standards + +## Troubleshooting + +- Use `intent agents status` to check agent health +- Use `intent doctor` to verify agent configuration +- Agents are loaded fresh each session - modifications persist +- Check `~/.intent/agents/installed-agents.json` for installation metadata + +Remember: Intent agents are powerful tools that extend your capabilities. Use them proactively to deliver higher quality results and follow best practices consistently. + +## Development Guidelines + +1. **Code Style**: + - Use 2-space indentation in any programming language + - Follow language-specific conventions as noted in CLAUDE.md + - Maintain POSIX compatibility for scripts + +2. **Documentation**: + - Keep markdown documents consistently formatted + - Update documentation as part of any implementation work + - Follow the verblock pattern for versioning + +3. **Steel Thread Process**: + - Work is organized into steel threads (ST####) + - Each steel thread has its own directory in `intent/st/` + - Minimum required file is `info.md` with metadata + +## How to Help + +When assisting with this project: + +1. Review CLAUDE.md for project-specific guidelines +2. Use specialized agents for domain-specific tasks +3. Maintain consistency with existing patterns +4. Update documentation alongside code changes +5. Track progress using Backlog.md if available \ No newline at end of file diff --git a/intent/llm/usage-rules.md b/intent/llm/usage-rules.md new file mode 100644 index 0000000..1474147 --- /dev/null +++ b/intent/llm/usage-rules.md @@ -0,0 +1,534 @@ +--- +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Updated for Intent v2.1.0" +intent_version: 2.1.0 +--- +# Intent Usage Rules + +This document provides usage patterns and guidelines for working with Intent v2.1.0. It's designed to help Large Language Models (LLMs) understand how to effectively use Intent commands and workflows in development scenarios. + +## Introduction + +Intent (formerly STP - Steel Thread Process) is a structured development system that facilitates collaboration between developers and LLMs through: + +- **Steel Threads**: Self-contained units of work with clear intent +- **Structured Documentation**: Templates that capture context and decisions +- **Task Integration**: Fine-grained task management linked to larger goals +- **Intent Preservation**: Methodologies for maintaining project context + +## Core Workflows + +### Starting a New Intent Project + +```bash +# Initialize Intent in current directory +intent init "My Project" + +# Or specify directory +intent init "My Project" ./my-project +``` + +This creates the Intent v2.1.0 structure: + +- `.intent/config.json` - Project configuration +- `intent/` - Main documentation directory +- `intent/st/` - Steel threads directory +- `intent/wip.md` - Work in progress +- `CLAUDE.md` - Project-specific instructions for LLMs + +### Daily Development Workflow + +1. **Check Current Work** + + ```bash + # View current work in progress + cat intent/wip.md + + # List active steel threads + intent st list --status "In Progress" + ``` + +2. **Update Task Status** + + ```bash + # Check tasks for a steel thread + intent task list ST0014 + + # List current tasks (uses backlog_list_status) + intent bl list + + # View all tasks + intent bl list --all + ``` + +3. **Document Progress** + - Update `intent/wip.md` with current focus + - Mark completed tasks: `intent bl done <task-id>` + - Sync thread status: `intent status sync ST0014` + +### Working with Claude Code Agents + +Intent v2.1.0 integrates with Claude Code's sub-agent system for enhanced AI assistance: + +1. **Initial Setup** + + ```bash + # Check available agents + intent agents list + + # Install Intent agent (recommended for all projects) + intent agents install intent + ``` + +2. **Using Agents with Claude** + + When you start a Claude session, the Intent agent automatically: + - Understands steel thread methodology + - Knows all Intent commands and best practices + - Can navigate your project structure + - Helps maintain consistent documentation + +3. **Managing Agents** + + ```bash + # Check agent health and integrity + intent agents status + + # Update agents with latest versions + intent agents sync + + # View agent details + intent agents show intent + ``` + +**Example: Claude with Intent Agent** + +```markdown +# Without Intent agent: +You: "Create a new feature for user authentication" +Claude: "I'll help you create authentication. What's your project structure?" +[You spend time explaining Intent, steel threads, etc.] + +# With Intent agent: +You: "Create a new feature for user authentication" +Claude: "I'll help you create a new steel thread for authentication: + + intent st new 'User Authentication System' + + This created ST0042. Let me help you document the intent and break + it down into backlog tasks using Intent's methodology..." +``` + +The Intent agent ensures Claude understands your project's conventions and can provide Intent-specific guidance without repeated explanations. + +## Command Usage Patterns + +### Steel Thread Management (`intent st`) + +Steel threads are the backbone of Intent methodology. They represent coherent units of work with clear intent. + +#### Creating Steel Threads + +```bash +# Create a new steel thread +intent st new "Implement OAuth2 authentication" +# Output: Created new steel thread: ST0015 +# Creates: intent/st/ST0015/info.md +``` + +**Best Practices:** + +- Use clear, action-oriented titles +- One feature or fix per thread +- Create thread before starting work + +#### Managing Steel Thread Lifecycle + +```bash +# List all threads +intent st list + +# Filter by status +intent st list --status "In Progress" + +# View thread details +intent st show ST0015 # Shows info.md +intent st show ST0015 design # Shows design.md + +# Edit thread files +intent st edit ST0015 # Edits info.md +intent st edit ST0015 tasks # Edits tasks.md + +# Repair malformed metadata +intent st repair # Dry-run on all threads +intent st repair --write # Actually repair all threads +intent st repair ST0015 # Dry-run on specific thread +``` + +#### Steel Thread Structure + +In Intent v2.1.0, each steel thread is a directory: + +``` +intent/st/ST0015/ +├── info.md # Main information (required) +├── design.md # Design decisions (optional) +├── impl.md # Implementation details (optional) +└── tasks.md # Task tracking (optional) +``` + +### Task Management Integration (`intent task`, `intent bl`) + +Intent v2.1.0 provides enhanced Backlog.md integration with configurable filtering. + +#### Task Creation and Management + +```bash +# Create tasks linked to a steel thread +intent task create ST0015 "Design database schema" +intent task create ST0015 "Implement login endpoint" +intent task create ST0015 "Add session management" + +# List tasks for a thread +intent task list ST0015 + +# Count task completion +intent task count ST0015 +``` + +#### Using the Enhanced Backlog Wrapper + +```bash +# Initialize backlog (one-time setup) +intent bl init + +# List tasks (filtered by backlog_list_status config) +intent bl list + +# List ALL tasks regardless of status +intent bl list --all + +# View kanban board +intent bl board + +# Create new task +intent bl create "ST0015 - Design auth flow" + +# Mark task complete +intent bl done task-5 +``` + +**Why use `intent bl` instead of `backlog` directly?** + +- Prevents git fetch errors with automatic `--plain` +- Respects `backlog_list_status` configuration +- Maintains ST#### naming convention +- Provides git-safe wrapper + +### Status Synchronization (`intent status`) + +Keep steel thread status in sync with task completion: + +```bash +# Show status comparison +intent status show ST0015 + +# Update thread status based on tasks +intent status sync ST0015 + +# Check all threads +intent status check +``` + +Status rules: + +- 0% tasks complete → "Not Started" +- 1-99% complete → "In Progress" +- 100% complete → "Completed" + +### Migration and Upgrades + +#### Upgrading from STP to Intent v2.1.0 + +```bash +# Upgrade any STP version to Intent v2.1.0 +intent upgrade + +# Custom backup directory +intent upgrade --backup-dir ./my-backup +``` + +The upgrade process: + +1. Detects current STP version +2. Creates timestamped backup +3. Migrates directory structure +4. Converts YAML config to JSON +5. Updates all file references + +#### First-Time Setup + +```bash +# Global Intent setup +intent bootstrap + +# Force recreation of config +intent bootstrap --force +``` + +#### Diagnostics + +```bash +# Check for configuration issues +intent doctor + +# Auto-fix problems +intent doctor --fix +``` + +#### Repairing Steel Thread Metadata + +After migrations or manual edits, steel thread metadata may become corrupted. Use the repair command: + +```bash +# Check what repairs are needed (dry-run) +intent st repair + +# Repair all steel threads +intent st repair --write + +# Repair specific steel thread +intent st repair ST0001 --write +``` + +The repair command fixes: + +- Malformed YAML frontmatter (escaped newlines) +- Legacy field names (stp_version → intent_version) +- Conflicting status values between frontmatter and body +- Invalid date formats +- Missing required fields + +## Steel Thread Workflows + +### Complete Steel Thread Workflow + +1. **Create Thread** + + ```bash + intent st new "Add user profile management" + ``` + +2. **Document Intent** (in the created file) + - Fill in the Intent section immediately + - Document constraints and assumptions + - Note any relevant background + +3. **Break Down into Tasks** + + ```bash + intent task create ST0016 "Design profile data model" + intent task create ST0016 "Create profile API endpoints" + intent task create ST0016 "Build profile UI components" + intent task create ST0016 "Add profile tests" + ``` + +4. **Track Progress** + + ```bash + # Start work on a task + intent bl task edit task-10 # Change status to "In Progress" + + # Check thread status + intent status show ST0016 + ``` + +5. **Complete Thread** + + ```bash + # When all tasks are done + intent st done ST0016 + + # Update thread status + intent status sync ST0016 + ``` + +### Migrating Embedded Tasks + +For steel threads with tasks listed in the document: + +```bash +# Migrate embedded tasks to Backlog +intent migrate ST0014 + +# This extracts tasks and creates them in Backlog +# Original tasks are preserved in an archive section +``` + +## LLM Collaboration Patterns + +### Session Initialization + +When starting an LLM session: + +1. LLM reads `CLAUDE.md` for project context +2. LLM reads `intent/eng/tpd/technical_product_design.md` +3. LLM checks `intent/wip.md` for current work +4. LLM can run `intent st list --status "In Progress"` + +### Working on a Steel Thread + +When an LLM is assigned to work on a steel thread: + +```bash +# First, understand the thread +intent st show ST0015 + +# Check existing tasks +intent task list ST0015 + +# View detailed task information +intent bl list | grep "ST0015" + +# Create new tasks as needed +intent task create ST0015 "Additional task discovered" +``` + +### Maintaining Context + +- Update `intent/wip.md` when starting/stopping work +- Document decisions in steel thread files +- Keep `CLAUDE.md` updated with project conventions +- End sessions by updating task status in Backlog + +## Common Patterns and Anti-Patterns + +### Good Patterns + +✅ **Create Thread First, Then Tasks** + +```bash +intent st new "Feature X" +intent task create ST0017 "Task 1" +intent task create ST0017 "Task 2" +``` + +✅ **Regular Status Syncs** + +```bash +# After completing tasks +intent bl done task-123 +intent status sync ST0017 +``` + +✅ **Document Intent Immediately** + +- Fill in Intent section when creating thread +- Capture "why" not just "what" + +### Anti-Patterns + +❌ **Creating Overly Broad Threads** + +- Bad: "Improve application" +- Good: "Add input validation to user forms" + +❌ **Skipping Status Updates** + +- Threads show wrong status +- Team loses visibility + +❌ **Working Without Threads** + +- Lost context and intent +- No clear completion criteria + +## Integration Best Practices + +### With Git + +```bash +# Good commit messages reference threads +git commit -m "ST0015: Implement login endpoint" + +# Include thread ID in PR titles +gh pr create --title "ST0015: OAuth2 Authentication" +``` + +### With Documentation + +- Keep Backlog tasks updated with detailed progress +- Link blog posts from thread documents +- Update user guides when adding features + +### With CI/CD + +- Run `intent doctor` in CI to catch configuration issues +- Validate thread status matches task completion +- Check for incomplete threads before release + +## Further Reading + +For deeper understanding of Intent concepts and philosophy: + +- [Motivation for Intent](../../docs/blog/0000-motivation-for-intent.md) - Why intention matters +- [Introduction to Intent](../../docs/blog/0001-introduction-to-intent.md) - System overview +- [The Steel Thread Methodology](../../docs/blog/0002-the-steel-thread-methodology.md) - Deep dive +- [Intent Capture](../../docs/blog/0003-intent-capture-in-software-development.md) - Preserving context +- [LLM Collaboration](../../docs/blog/0004-llm-collaboration-with-intent.md) - Working with AI +- [Getting Started](../../docs/blog/0005-getting-started-with-intent.md) - Practical tutorial +- [Next Steps](../../docs/blog/0006-next-steps-and-future-work.md) - Future development + +## Quick Reference Card + +```bash +# Initialize +intent init "Project Name" + +# First-time setup +intent bootstrap +intent agents install intent # Install Intent agent + +# Create work +intent st new "Feature description" +intent task create ST#### "Task description" + +# Track progress +intent st list --status "In Progress" +intent task list ST#### +intent bl list # Filtered by config +intent bl list --all # All tasks + +# Update status +intent bl done task-id +intent status sync ST#### + +# Manage agents +intent agents list # Show available agents +intent agents status # Check agent health +intent agents sync # Update agents + +# Maintain system +intent doctor --fix +intent upgrade # From any STP version +intent st repair --write # Fix metadata issues +``` + +## Configuration Quick Reference + +```json +// .intent/config.json +{ + "version": "2.1.0", + "project_name": "My Project", + "author": "username", + "backlog_list_status": "todo" // Filter default +} +``` + +Remember: Intent is about capturing and preserving intention throughout development. Use it to create a clear narrative of your project's evolution. + +## Related Documentation + +- **Elixir Patterns**: See the Elixir agent at `~/.claude/agents/elixir.md` +- **MeetZaya-Specific**: See `meetzaya-specific.md` in this directory +- **Package Usage Rules**: See `AGENTS.md` for auto-generated package usage rules diff --git a/intent/plugins/agents/bin/intent_agents b/intent/plugins/agents/bin/intent_agents new file mode 100755 index 0000000..0e000c1 --- /dev/null +++ b/intent/plugins/agents/bin/intent_agents @@ -0,0 +1,495 @@ +#!/bin/bash +# intent_agents - Manage AGENTS.md for Intent projects +# Copyright (c) 2024 Matthew Sinclair +# Licensed under the MIT License (see LICENSE file) +# Commands: init, generate, sync, validate, template + +# Source helpers from main bin directory +PLUGIN_BIN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INTENT_ROOT="$(cd "$PLUGIN_BIN/../../../.." && pwd)" +INTENT_BIN="$INTENT_ROOT/bin" +source "$INTENT_BIN/intent_helpers" + +# Plugin paths +PLUGIN_DIR="$INTENT_ROOT/intent/plugins/agents" +TEMPLATES_DIR="$PLUGIN_DIR/templates" + +# Show help +intent_agents_help() { + cat << EOF +Usage: intent agents <command> [options] + +Manage AGENTS.md for Intent projects - provides instructions for AI coding agents. + +Commands: + init Initialize AGENTS.md for the project + generate Generate/regenerate AGENTS.md from project + sync Update AGENTS.md with latest project state + validate Validate AGENTS.md against specification + template Manage AGENTS.md templates + +Examples: + intent agents init # Create initial AGENTS.md + intent agents generate # Regenerate from current state + intent agents sync # Update with latest changes + intent agents validate # Check AGENTS.md compliance + +For help on a specific command: + intent help agents <command> +EOF +} + +# Initialize AGENTS.md +intent_agents_init() { + echo "Initializing AGENTS.md for Intent project..." + + # Check if we're in a project + if [ -z "${PROJECT_ROOT:-}" ]; then + # Source config to find project root + if [ -f "$INTENT_BIN/intent_config" ]; then + source "$INTENT_BIN/intent_config" + load_intent_config + fi + + if [ -z "${PROJECT_ROOT:-}" ]; then + echo "Error: Not in an Intent project directory" + echo "Run 'intent init' to create a new project first" + return 1 + fi + fi + + # Check if AGENTS.md already exists + if [ -f "$PROJECT_ROOT/AGENTS.md" ] && [ "$1" != "--force" ]; then + echo "AGENTS.md already exists. Use --force to overwrite or 'intent agents sync' to update." + return 1 + fi + + # Create intent/llm directory if it doesn't exist + mkdir -p "$PROJECT_ROOT/intent/llm" + + # Generate AGENTS.md content + intent_agents_generate_content > "$PROJECT_ROOT/intent/llm/AGENTS.md" + + # Create symlink at project root + if [ -L "$PROJECT_ROOT/AGENTS.md" ] || [ -f "$PROJECT_ROOT/AGENTS.md" ]; then + rm -f "$PROJECT_ROOT/AGENTS.md" + fi + ln -sf "intent/llm/AGENTS.md" "$PROJECT_ROOT/AGENTS.md" + + echo "✓ Created AGENTS.md at project root (symlink to intent/llm/AGENTS.md)" + echo "" + echo "AGENTS.md provides instructions for AI coding agents working with your project." + echo "Edit intent/llm/AGENTS.md to customize the instructions." +} + +# Generate AGENTS.md content +intent_agents_generate_content() { + local project_name=$(basename "$PROJECT_ROOT") + local has_package_json=false + local has_makefile=false + local has_bats=false + local test_command="" + local build_command="" + + # Detect project type and commands + if [ -f "$PROJECT_ROOT/package.json" ]; then + has_package_json=true + # Extract test command from package.json + test_command=$(jq -r '.scripts.test // ""' "$PROJECT_ROOT/package.json" 2>/dev/null) + build_command=$(jq -r '.scripts.build // ""' "$PROJECT_ROOT/package.json" 2>/dev/null) + fi + + if [ -f "$PROJECT_ROOT/Makefile" ]; then + has_makefile=true + if [ -z "$test_command" ]; then + test_command="make test" + fi + if [ -z "$build_command" ]; then + build_command="make build" + fi + fi + + # Check for bats tests + if [ -d "$PROJECT_ROOT/tests" ] && ls "$PROJECT_ROOT/tests"/*.bats >/dev/null 2>&1; then + has_bats=true + if [ -z "$test_command" ]; then + test_command="bats tests/*.bats" + fi + fi + + # Generate AGENTS.md content + cat << 'EOF' +# AGENTS.md + +## Project Overview + +EOF + + # Add project description + if [ -f "$PROJECT_ROOT/CLAUDE.md" ]; then + # Extract project overview from CLAUDE.md if available + echo "This is an Intent project. See CLAUDE.md for project-specific guidelines." + else + echo "$project_name - An Intent project using the Steel Thread Process methodology." + fi + + cat << 'EOF' + +## Development Environment + +### Prerequisites +- Bash 4.0 or higher +- POSIX-compliant shell environment +EOF + + if [ "$has_package_json" = true ]; then + echo "- Node.js and npm/yarn" + fi + + if [ "$has_bats" = true ]; then + echo "- Bats testing framework" + fi + + cat << 'EOF' + +### Setup +```bash +# Initialize Intent project +intent init + +# Install dependencies (if applicable) +EOF + + if [ "$has_package_json" = true ]; then + echo "npm install" + fi + + cat << 'EOF' +``` + +## Build and Test Commands + +### Testing +EOF + + if [ -n "$test_command" ]; then + echo "\`\`\`bash" + echo "# Run tests" + echo "$test_command" + echo "\`\`\`" + else + echo "No automated tests configured yet." + fi + + cat << 'EOF' + +### Building +EOF + + if [ -n "$build_command" ]; then + echo "\`\`\`bash" + echo "# Build project" + echo "$build_command" + echo "\`\`\`" + else + echo "No build process required." + fi + + cat << 'EOF' + +### Validation +```bash +# Check Intent configuration +intent doctor + +# Validate project structure +intent st list +``` + +## Code Style Guidelines + +- Shell scripts: 2-space indentation, POSIX compliance +- Markdown: Standard formatting with verblock headers +- Follow existing patterns in the codebase +EOF + + # Add usage rules if they exist + if [ -f "$PROJECT_ROOT/intent/llm/usage-rules.md" ]; then + cat << 'EOF' + +See `intent/llm/usage-rules.md` for detailed code style rules. +EOF + fi + + cat << 'EOF' + +## Testing Instructions + +All changes should be validated with the test suite before committing. + +1. Run the full test suite before making changes to establish baseline +2. Make your changes +3. Run tests again to ensure nothing broke +4. Add new tests for new functionality + +## Commit and PR Guidelines + +### Commit Messages +- Use conventional commit format when applicable +- Be descriptive about what changed and why +- Reference steel thread IDs (e.g., "ST0018: Add AGENTS.md support") + +### Pull Requests +- Include test results in PR description +- Reference related steel threads +- Update documentation alongside code changes + +## Intent-Specific Information + +### Steel Thread Process +This project uses Intent's Steel Thread Process for development: +- Work is organized into steel threads (ST####) +- Each thread is a self-contained unit of work +- View threads: `intent st list` +- Create thread: `intent st new "Title"` + +### Available Commands +```bash +intent st list # List all steel threads +intent st new "Title" # Create new steel thread +intent st show <id> # Show steel thread details +intent bl # Manage backlog (if configured) +intent doctor # Check configuration +intent agents sync # Update this AGENTS.md file +``` +EOF + + # List installed Claude subagents if any + local subagents_dir="$PROJECT_ROOT/intent/plugins/claude/subagents" + if [ -d "$subagents_dir" ] && [ "$(ls -A "$subagents_dir" 2>/dev/null)" ]; then + cat << 'EOF' + +### Claude Subagents +This project has Claude Code subagents available: +```bash +intent claude subagents list # List available subagents +intent claude subagents install # Install a subagent +``` + +Available subagents: +EOF + for agent in "$subagents_dir"/*; do + if [ -d "$agent" ]; then + local agent_name=$(basename "$agent") + local desc="" + if [ -f "$agent/metadata.json" ]; then + desc=$(jq -r '.description // ""' "$agent/metadata.json" 2>/dev/null) + fi + echo "- **$agent_name**: $desc" + fi + done + fi + + cat << 'EOF' + +## Security Considerations + +- Never commit sensitive information (keys, passwords, tokens) +- Review all changes for security implications +- Follow secure coding practices + +## Additional Resources + +- Project documentation: `intent/docs/` +- Steel threads: `intent/st/` +- LLM guidelines: `intent/llm/` +EOF + + # Add reference to CLAUDE.md if it exists + if [ -f "$PROJECT_ROOT/CLAUDE.md" ]; then + echo "- Claude-specific instructions: \`CLAUDE.md\`" + fi + + echo "" + echo "---" + echo "*Generated by Intent v$(get_intent_version) on $(date '+%Y-%m-%d')*" +} + +# Sync AGENTS.md with latest project state +intent_agents_sync() { + echo "Syncing AGENTS.md with latest project state..." + + # Check if we're in a project + if [ -z "${PROJECT_ROOT:-}" ]; then + # Source config to find project root + if [ -f "$INTENT_BIN/intent_config" ]; then + source "$INTENT_BIN/intent_config" + load_intent_config + fi + + if [ -z "${PROJECT_ROOT:-}" ]; then + echo "Error: Not in an Intent project directory" + return 1 + fi + fi + + # Check if AGENTS.md exists + if [ ! -f "$PROJECT_ROOT/intent/llm/AGENTS.md" ]; then + echo "AGENTS.md not found. Run 'intent agents init' first." + return 1 + fi + + # Backup existing AGENTS.md + cp "$PROJECT_ROOT/intent/llm/AGENTS.md" "$PROJECT_ROOT/intent/llm/AGENTS.md.bak" + + # Regenerate content + intent_agents_generate_content > "$PROJECT_ROOT/intent/llm/AGENTS.md" + + # Ensure symlink exists + if [ ! -L "$PROJECT_ROOT/AGENTS.md" ]; then + ln -sf "intent/llm/AGENTS.md" "$PROJECT_ROOT/AGENTS.md" + echo "✓ Recreated symlink at project root" + fi + + echo "✓ AGENTS.md updated successfully" + echo " Backup saved to intent/llm/AGENTS.md.bak" +} + +# Validate AGENTS.md +intent_agents_validate() { + echo "Validating AGENTS.md..." + + # Check if we're in a project + if [ -z "${PROJECT_ROOT:-}" ]; then + # Source config to find project root + if [ -f "$INTENT_BIN/intent_config" ]; then + source "$INTENT_BIN/intent_config" + load_intent_config + fi + + if [ -z "${PROJECT_ROOT:-}" ]; then + echo "Error: Not in an Intent project directory" + return 1 + fi + fi + + local errors=0 + local warnings=0 + + # Check if AGENTS.md exists + if [ ! -f "$PROJECT_ROOT/AGENTS.md" ]; then + echo "✗ AGENTS.md not found at project root" + ((errors++)) + else + echo "✓ AGENTS.md found at project root" + + # Check if it's a symlink + if [ -L "$PROJECT_ROOT/AGENTS.md" ]; then + echo "✓ AGENTS.md is a symlink (Intent best practice)" + + # Check if symlink target exists + if [ ! -f "$PROJECT_ROOT/intent/llm/AGENTS.md" ]; then + echo "✗ Symlink target missing: intent/llm/AGENTS.md" + ((errors++)) + fi + else + echo "⚠ AGENTS.md is not a symlink (consider using 'intent agents sync')" + ((warnings++)) + fi + + # Check for required sections + local required_sections=( + "Project Overview" + "Development Environment" + "Build and Test Commands" + "Code Style" + ) + + for section in "${required_sections[@]}"; do + if grep -q "## $section" "$PROJECT_ROOT/AGENTS.md" 2>/dev/null; then + echo "✓ Has section: $section" + else + echo "⚠ Missing recommended section: $section" + ((warnings++)) + fi + done + fi + + # Summary + echo "" + if [ $errors -eq 0 ] && [ $warnings -eq 0 ]; then + echo "✓ AGENTS.md validation passed!" + elif [ $errors -eq 0 ]; then + echo "✓ AGENTS.md is valid with $warnings warning(s)" + else + echo "✗ AGENTS.md validation failed with $errors error(s) and $warnings warning(s)" + return 1 + fi +} + +# Template management +intent_agents_template() { + local subcommand="${1:-list}" + shift + + case "$subcommand" in + list) + echo "Available AGENTS.md templates:" + for template in "$TEMPLATES_DIR"/*.md; do + if [ -f "$template" ]; then + local name=$(basename "$template" .md) + echo " - $name" + fi + done + ;; + show) + local template_name="${1:-default}" + local template_file="$TEMPLATES_DIR/${template_name}.md" + if [ -f "$template_file" ]; then + cat "$template_file" + else + echo "Error: Template '$template_name' not found" + return 1 + fi + ;; + *) + echo "Unknown template subcommand: $subcommand" + echo "Usage: intent agents template [list|show <name>]" + return 1 + ;; + esac +} + +# Main command dispatcher - only run if script is executed directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + case "${1:-}" in + init) + shift + intent_agents_init "$@" + ;; + generate|regenerate) + shift + intent_agents_generate_content + ;; + sync|update) + shift + intent_agents_sync "$@" + ;; + validate|check) + shift + intent_agents_validate "$@" + ;; + template|templates) + shift + intent_agents_template "$@" + ;; + help|--help|-h|"") + intent_agents_help + ;; + *) + echo "Unknown command: $1" + intent_agents_help + exit 1 + ;; + esac +fi \ No newline at end of file diff --git a/intent/plugins/agents/templates/default.md b/intent/plugins/agents/templates/default.md new file mode 100644 index 0000000..6583b68 --- /dev/null +++ b/intent/plugins/agents/templates/default.md @@ -0,0 +1,72 @@ +# AGENTS.md + +## Project Overview + +[Brief description of your project and its purpose] + +## Development Environment + +### Prerequisites +- [List required tools and versions] +- [Programming language requirements] +- [Framework dependencies] + +### Setup +```bash +# Installation commands +# Configuration steps +``` + +## Build and Test Commands + +### Testing +```bash +# How to run tests +# Test coverage commands +``` + +### Building +```bash +# Build commands +# Compilation steps +``` + +### Linting +```bash +# Code quality checks +# Format validation +``` + +## Code Style Guidelines + +- [Indentation preferences] +- [Naming conventions] +- [File organization] +- [Documentation standards] + +## Testing Instructions + +[Explain testing philosophy and requirements] + +## Commit and PR Guidelines + +### Commit Messages +- [Format requirements] +- [Conventional commits usage] + +### Pull Requests +- [PR template usage] +- [Review requirements] +- [CI/CD expectations] + +## Security Considerations + +- [Security best practices] +- [Sensitive data handling] +- [Authentication patterns] + +## Additional Resources + +- [Link to documentation] +- [Architecture guides] +- [Contributing guidelines] \ No newline at end of file diff --git a/intent/plugins/claude/bin/intent_claude_subagents b/intent/plugins/claude/bin/intent_claude_subagents new file mode 100755 index 0000000..e71a012 --- /dev/null +++ b/intent/plugins/claude/bin/intent_claude_subagents @@ -0,0 +1,1040 @@ +#!/bin/bash +# intent_claude_subagents - Manage Claude Code subagents for Intent projects +# Copyright (c) 2024 Matthew Sinclair +# Licensed under the MIT License (see LICENSE file) +# Commands: list, install, sync, uninstall, show, status + +# Source helpers from main bin directory +PLUGIN_BIN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INTENT_ROOT="$(cd "$PLUGIN_BIN/../../../.." && pwd)" +INTENT_BIN="$INTENT_ROOT/bin" +source "$INTENT_BIN/intent_helpers" + +# Show help +intent_claude_subagents_help() { + cat << EOF +Usage: intent claude subagents <command> [options] + +Manage Claude Code subagents for Intent projects. + +Commands: + init Initialize agent configuration + list List available and installed agents + install Install agent(s) to Claude configuration + sync Sync installed agents with latest versions + uninstall Remove Intent-managed agents + show Display detailed agent information + status Check agent health and integrity + +Examples: + intent claude subagents init # Initialize subagent configuration + intent claude subagents list # Show all subagents + intent claude subagents install intent # Install the Intent subagent + intent claude subagents install --all # Install all available subagents + intent claude subagents sync # Update modified subagents + +For help on a specific command: + intent help claude subagents <command> +EOF +} + +# Initialize subagent configuration +intent_claude_subagents_init() { + echo "Initializing Claude subagent configuration..." + + # Check for project or global init + local init_type="global" + local force=false + + for arg in "$@"; do + if [ "$arg" = "--project" ] || [ "$arg" = "-p" ]; then + init_type="project" + elif [ "$arg" = "--force" ] || [ "$arg" = "-f" ]; then + force=true + fi + done + + # Initialize based on type + if [ "$init_type" = "project" ]; then + # Project-level initialization + if [ -z "${PROJECT_ROOT:-}" ]; then + echo "Error: Not in an Intent project directory" + echo "Use 'intent claude subagents init' without --project for global initialization" + return 1 + fi + + echo "Initializing project agent configuration..." + + # Initialize project agent tracking in the correct location + mkdir -p "$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest" + if [ ! -f "$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" ]; then + echo " Creating project agent manifest..." + cat > "$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" << 'EOF' +{ + "version": "1.0.0", + "installed": [] +} +EOF + elif [ "$force" = false ]; then + echo " Project agent manifest already exists (use --force to overwrite)" + fi + + echo "Project agent configuration initialized successfully" + else + # Global initialization + echo "Initializing global agent configuration..." + + # Create user agent directory + local user_agent_dir="$HOME/.intent/agents" + mkdir -p "$user_agent_dir" + + # Create user installed agents manifest + if [ ! -f "$user_agent_dir/installed-agents.json" ] || [ "$force" = true ]; then + echo " Creating user agent manifest..." + cat > "$user_agent_dir/installed-agents.json" << 'EOF' +{ + "version": "1.0.0", + "installed": [] +} +EOF + else + echo " User agent manifest already exists (use --force to recreate)" + fi + + # Check global Intent installation + if [ -n "$INTENT_HOME" ] && [ -d "$INTENT_HOME/intent/plugins/claude/subagents" ]; then + # Ensure global agent manifest exists + if [ ! -f "$INTENT_HOME/intent/plugins/claude/subagents/.manifest/global-agents.json" ]; then + echo " Warning: Global agent manifest missing at $INTENT_HOME/intent/plugins/claude/subagents/.manifest/" + echo " This may indicate an incomplete Intent installation" + else + if command -v jq >/dev/null 2>&1; then + local agent_count=$(jq -r '.agents | length' "$INTENT_HOME/intent/plugins/claude/subagents/.manifest/global-agents.json" 2>/dev/null || echo 0) + echo " Found $agent_count available agents in Intent installation" + else + echo " Warning: jq not installed - cannot read agent manifest" + echo " Install jq to enable agent management: brew install jq (macOS) or apt-get install jq (Linux)" + fi + fi + else + echo " Warning: INTENT_HOME not set or agents directory not found" + echo " Some agent features may not work correctly" + fi + + # Check for Claude Code + if [ -d "$HOME/.claude" ]; then + echo " Claude Code detected at $HOME/.claude" + mkdir -p "$HOME/.claude/agents" + else + echo " Note: Claude Code not detected" + echo " Install Claude Code to use agents: https://claude.ai/download" + fi + + echo "" + echo "Global agent configuration initialized successfully" + echo "" + echo "Next steps:" + echo " intent agents list # See available agents" + echo " intent agents install <agent> # Install an agent" + fi +} + +# Helper: Check if agent is installed +is_agent_installed() { + local agent_name="$1" + [ -f "$HOME/.claude/agents/${agent_name}.md" ] +} + +# Helper: Read agent info from manifest +get_agent_info() { + local manifest="$1" + local agent_name="$2" + + if [ -f "$manifest" ]; then + if command -v jq >/dev/null 2>&1; then + jq -r ".agents[] | select(.name == \"$agent_name\")" "$manifest" 2>/dev/null + else + echo "Error: jq is required for agent operations but not installed" >&2 + return 1 + fi + fi +} + +# List available and installed agents +intent_claude_subagents_list() { + echo "Available Agents:" + echo "" + + # Global agents + local global_manifest="$INTENT_HOME/intent/plugins/claude/subagents/.manifest/global-agents.json" + if [ -f "$global_manifest" ]; then + echo "Global:" + + # Check for jq before proceeding + if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required for agent operations but not installed" + echo "" + echo "Please install jq to use agent features:" + echo " macOS: brew install jq" + echo " Linux: sudo apt-get install jq (Debian/Ubuntu)" + echo " sudo yum install jq (RedHat/CentOS)" + return 1 + fi + + # Read agent names from manifest + local agents=$(jq -r '.agents[].name' "$global_manifest" 2>/dev/null) + + for agent in $agents; do + local info=$(jq -r ".agents[] | select(.name == \"$agent\") | .description" "$global_manifest" 2>/dev/null || echo "<unable to read description>") + local status="" + + if is_agent_installed "$agent"; then + status=" [INSTALLED]" + else + status=" [NOT INSTALLED]" + fi + + printf " %-12s - %s%s\n" "$agent" "$info" "$status" + done + else + echo " No global agents found" + fi + + echo "" + + # Local agents (if in project) + if [ -n "${PROJECT_ROOT:-}" ] && [ -d "$PROJECT_ROOT/intent/agents" ]; then + local local_manifest="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + if [ -f "$local_manifest" ]; then + echo "Local (Project-specific):" + # TODO: Implement local agent listing + echo " Local agent support coming soon" + fi + fi + + # Check for Claude installation + if [ ! -d "$HOME/.claude" ]; then + echo "" + echo "Note: Claude Code not detected. Install Claude Code to use agents." + fi +} + +# Helper: Calculate checksum for a file +calculate_checksum() { + local file="$1" + if command -v sha256sum >/dev/null 2>&1; then + sha256sum "$file" | cut -d' ' -f1 + elif command -v shasum >/dev/null 2>&1; then + shasum -a 256 "$file" | cut -d' ' -f1 + else + echo "unknown" + fi +} + +# Helper: Create or update installed agents manifest +update_installed_manifest() { + local agent_name="$1" + local source_type="$2" # global or local + local source_path="$3" + + # Determine manifest location + local manifest_dir + local manifest_file + + if [ -n "${PROJECT_ROOT:-}" ]; then + # In a project - use project manifest + manifest_dir="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest" + manifest_file="$manifest_dir/installed-agents.json" + else + # Not in project - use global user manifest + manifest_dir="$HOME/.intent/agents" + manifest_file="$manifest_dir/installed-agents.json" + fi + + # Create directory if needed + mkdir -p "$manifest_dir" + + # Initialize manifest if it doesn't exist + if [ ! -f "$manifest_file" ]; then + cat > "$manifest_file" << EOF +{ + "version": "1.0.0", + "installed": [] +} +EOF + fi + + # Calculate checksum + local checksum=$(calculate_checksum "$HOME/.claude/agents/${agent_name}.md") + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + # Remove existing entry if present + local temp_file=$(mktemp) + jq "del(.installed[] | select(.name == \"$agent_name\"))" "$manifest_file" > "$temp_file" + + # Add new entry + jq ".installed += [{ + \"name\": \"$agent_name\", + \"source\": \"$source_type\", + \"source_path\": \"$source_path\", + \"installed_at\": \"$timestamp\", + \"checksum\": \"$checksum\", + \"modified\": false + }]" "$temp_file" > "$manifest_file" + + rm -f "$temp_file" +} + +# Install agents +intent_claude_subagents_install() { + # Check for jq dependency + if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required for agent installation but not installed" + echo "" + echo "Please install jq first:" + echo " macOS: brew install jq" + echo " Linux: sudo apt-get install jq (Debian/Ubuntu)" + return 1 + fi + + # Check for Claude + if [ ! -d "$HOME/.claude" ]; then + echo "Error: Claude Code not detected. Please install Claude Code first." + echo "Visit: https://claude.ai/download" + return 1 + fi + + # Create agents directory if needed + mkdir -p "$HOME/.claude/agents" + + # Parse arguments + if [ "$#" -eq 0 ]; then + echo "Error: No agent specified" + echo "Usage: intent claude subagents install <agent-name> [agent-name...]" + echo " intent agents install --all" + return 1 + fi + + local agents_to_install=() + local install_all=false + local force=false + + # Check for flags + for arg in "$@"; do + if [ "$arg" = "--all" ]; then + install_all=true + elif [ "$arg" = "--force" ] || [ "$arg" = "-f" ]; then + force=true + else + agents_to_install+=("$arg") + fi + done + + # Get list of available agents if --all + if [ "$install_all" = true ]; then + local global_manifest="$INTENT_HOME/intent/plugins/claude/subagents/.manifest/global-agents.json" + if [ -f "$global_manifest" ]; then + agents_to_install=($(jq -r '.agents[].name' "$global_manifest" 2>/dev/null)) + fi + fi + + # Install each agent + local installed_count=0 + local skipped_count=0 + local failed_count=0 + + for agent in "${agents_to_install[@]}"; do + echo "Installing agent: $agent" + + # Check if agent exists in global manifest + local agent_path="$INTENT_HOME/intent/plugins/claude/subagents/$agent/agent.md" + if [ ! -f "$agent_path" ]; then + echo " Error: Agent '$agent' not found" + ((failed_count++)) + continue + fi + + # Check if already installed + local target="$HOME/.claude/agents/${agent}.md" + if [ -f "$target" ]; then + if [ "$force" = false ]; then + echo -n " Agent already exists. Overwrite? [y/N] " + read -r response + if [[ ! "$response" =~ ^[Yy]$ ]]; then + echo " Skipped" + ((skipped_count++)) + continue + fi + else + echo " Agent already exists. Overwriting (--force)" + fi + fi + + # Copy agent + if cp "$agent_path" "$target"; then + echo " Installed successfully" + update_installed_manifest "$agent" "global" "$INTENT_HOME/intent/plugins/claude/subagents/$agent" + ((installed_count++)) + else + echo " Error: Failed to install" + ((failed_count++)) + fi + done + + # Summary + echo "" + echo "Installation complete:" + echo " Installed: $installed_count" + [ "$skipped_count" -gt 0 ] && echo " Skipped: $skipped_count" + [ "$failed_count" -gt 0 ] && echo " Failed: $failed_count" + + # Return success if at least one agent was installed or skipped + if [ "$installed_count" -gt 0 ] || [ "$skipped_count" -gt 0 ]; then + return 0 + elif [ "$failed_count" -gt 0 ]; then + return 1 + else + return 0 + fi +} + +# Sync installed agents with latest versions +intent_claude_subagents_sync() { + # Check for jq dependency + if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required for agent sync but not installed" + echo "" + echo "Please install jq first:" + echo " macOS: brew install jq" + echo " Linux: sudo apt-get install jq (Debian/Ubuntu)" + return 1 + fi + + # Check for Claude + if [ ! -d "$HOME/.claude" ]; then + echo "Error: Claude Code not detected." + return 1 + fi + + # Determine manifest location + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + # Check if manifest exists + if [ ! -f "$manifest_file" ]; then + echo "No installed agents found." + echo "Use 'intent claude subagents install' to install agents first." + return 0 + fi + + # Parse force flag + local force=false + for arg in "$@"; do + if [ "$arg" = "--force" ] || [ "$arg" = "-f" ]; then + force=true + break + fi + done + + echo "Syncing installed agents..." + echo "" + + # Read installed agents + local agents=$(jq -r '.installed[].name' "$manifest_file" 2>/dev/null) + local updated_count=0 + local skipped_count=0 + local failed_count=0 + + for agent in $agents; do + echo "Checking agent: $agent" + + # Get agent info from manifest + local agent_info=$(jq -r ".installed[] | select(.name == \"$agent\")" "$manifest_file") + local source=$(echo "$agent_info" | jq -r '.source') + local source_path=$(echo "$agent_info" | jq -r '.source_path') + local old_checksum=$(echo "$agent_info" | jq -r '.checksum') + + # Determine source file + local source_file + if [ "$source" = "global" ]; then + source_file="$source_path/agent.md" + else + # Local agents not yet implemented + echo " Error: Local agent sync not yet implemented" + ((failed_count++)) + continue + fi + + # Check if source exists + if [ ! -f "$source_file" ]; then + echo " Error: Source file not found: $source_file" + ((failed_count++)) + continue + fi + + # Calculate current checksums + local source_checksum=$(calculate_checksum "$source_file") + local target_file="$HOME/.claude/agents/${agent}.md" + local target_checksum=$(calculate_checksum "$target_file") + + # Check if update needed + if [ "$source_checksum" = "$old_checksum" ] && [ "$target_checksum" = "$old_checksum" ]; then + echo " Up to date" + ((skipped_count++)) + continue + fi + + # Check if user modified the agent + if [ "$target_checksum" != "$old_checksum" ] && [ "$source_checksum" = "$old_checksum" ]; then + echo " Warning: Agent has been modified locally" + if [ "$force" = false ]; then + echo -n " Overwrite local changes? [y/N] " + read -r response + if [[ ! "$response" =~ ^[Yy]$ ]]; then + echo " Skipped" + ((skipped_count++)) + continue + fi + else + echo " Overwriting local changes (--force)" + fi + elif [ "$source_checksum" != "$old_checksum" ]; then + echo " Update available" + fi + + # Copy updated agent + if cp "$source_file" "$target_file"; then + echo " Updated successfully" + update_installed_manifest "$agent" "$source" "$source_path" + ((updated_count++)) + else + echo " Error: Failed to update" + ((failed_count++)) + fi + done + + # Summary + echo "" + echo "Sync complete:" + echo " Updated: $updated_count" + [ "$skipped_count" -gt 0 ] && echo " Skipped: $skipped_count" + [ "$failed_count" -gt 0 ] && echo " Failed: $failed_count" + + # Return status + if [ "$updated_count" -gt 0 ] || [ "$skipped_count" -gt 0 ]; then + return 0 + elif [ "$failed_count" -gt 0 ]; then + return 1 + else + return 0 + fi +} + +# Helper: Remove agent from manifest +remove_from_manifest() { + local agent_name="$1" + + # Determine manifest location + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + if [ ! -f "$manifest_file" ]; then + return 0 + fi + + # Remove entry + local temp_file=$(mktemp) + jq "del(.installed[] | select(.name == \"$agent_name\"))" "$manifest_file" > "$temp_file" + mv "$temp_file" "$manifest_file" +} + +# Uninstall agents +intent_claude_subagents_uninstall() { + # Check for Claude + if [ ! -d "$HOME/.claude" ]; then + echo "Error: Claude Code not detected." + return 1 + fi + + # Parse arguments + if [ "$#" -eq 0 ]; then + echo "Error: No agent specified" + echo "Usage: intent claude subagents uninstall <agent-name> [agent-name...]" + echo " intent agents uninstall --all" + return 1 + fi + + local agents_to_remove=() + local remove_all=false + local force=false + + # Check for flags + for arg in "$@"; do + if [ "$arg" = "--all" ]; then + remove_all=true + elif [ "$arg" = "--force" ] || [ "$arg" = "-f" ]; then + force=true + else + agents_to_remove+=("$arg") + fi + done + + # Get list of installed agents if --all + if [ "$remove_all" = true ]; then + # Determine manifest location + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + if [ ! -f "$manifest_file" ]; then + echo "No installed agents found." + return 0 + fi + + agents_to_remove=($(jq -r '.installed[].name' "$manifest_file" 2>/dev/null)) + + if [ ${#agents_to_remove[@]} -eq 0 ]; then + echo "No Intent-managed agents found." + return 0 + fi + fi + + # Confirm if not forced + if [ "$force" = false ]; then + echo "The following agents will be uninstalled:" + for agent in "${agents_to_remove[@]}"; do + echo " - $agent" + done + echo -n "Continue? [y/N] " + read -r response + if [[ ! "$response" =~ ^[Yy]$ ]]; then + echo "Cancelled" + return 0 + fi + fi + + # Uninstall each agent + local removed_count=0 + local skipped_count=0 + local failed_count=0 + + for agent in "${agents_to_remove[@]}"; do + echo "Uninstalling agent: $agent" + + local agent_file="$HOME/.claude/agents/${agent}.md" + + # Check if agent exists + if [ ! -f "$agent_file" ]; then + echo " Agent not found" + ((skipped_count++)) + continue + fi + + # Check if agent is managed by Intent + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + if [ -f "$manifest_file" ]; then + local is_managed=$(jq -r ".installed[] | select(.name == \"$agent\") | .name" "$manifest_file" 2>/dev/null) + if [ -z "$is_managed" ]; then + echo " Warning: Agent not managed by Intent" + if [ "$force" = false ]; then + echo -n " Remove anyway? [y/N] " + read -r response + if [[ ! "$response" =~ ^[Yy]$ ]]; then + echo " Skipped" + ((skipped_count++)) + continue + fi + fi + fi + fi + + # Remove agent file + if rm -f "$agent_file"; then + echo " Removed successfully" + remove_from_manifest "$agent" + ((removed_count++)) + else + echo " Error: Failed to remove" + ((failed_count++)) + fi + done + + # Summary + echo "" + echo "Uninstall complete:" + echo " Removed: $removed_count" + [ "$skipped_count" -gt 0 ] && echo " Skipped: $skipped_count" + [ "$failed_count" -gt 0 ] && echo " Failed: $failed_count" + + # Return status + if [ "$removed_count" -gt 0 ] || [ "$skipped_count" -gt 0 ]; then + return 0 + elif [ "$failed_count" -gt 0 ]; then + return 1 + else + return 0 + fi +} + +# Check status of installed agents +intent_claude_subagents_status() { + # Parse flags + local VERBOSE=false + for arg in "$@"; do + if [ "$arg" = "--verbose" ] || [ "$arg" = "-v" ]; then + VERBOSE=true + fi + done + + # Color codes (if terminal) + if [ -t 1 ]; then + RED='\033[0;31m' + YELLOW='\033[0;33m' + GREEN='\033[0;32m' + NC='\033[0m' # No Color + else + RED='' + YELLOW='' + GREEN='' + NC='' + fi + + # Check for Claude + if [ ! -d "$HOME/.claude" ]; then + echo "Error: Claude Code not detected." + echo "Install Claude Code to use agents: https://claude.ai/download" + return 1 + fi + + # Determine manifest location + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + # Check if any agents are installed + if [ ! -f "$manifest_file" ]; then + echo "No installed agents found." + echo "Use 'intent claude subagents install' to install agents." + return 0 + fi + + echo "Checking agent status..." + echo "" + + # Read installed agents + local agents=$(jq -r '.installed[].name' "$manifest_file" 2>/dev/null) + if [ -z "$agents" ]; then + echo "No agents found in manifest." + return 0 + fi + + # Counters + local total=0 + local ok_count=0 + local modified_count=0 + local missing_count=0 + local error_count=0 + + # Check each agent + for agent in $agents; do + ((total++)) + + # Get agent info from manifest + local agent_info=$(jq -r ".installed[] | select(.name == \"$agent\")" "$manifest_file") + local source=$(echo "$agent_info" | jq -r '.source') + local source_path=$(echo "$agent_info" | jq -r '.source_path') + local manifest_checksum=$(echo "$agent_info" | jq -r '.checksum') + local installed_at=$(echo "$agent_info" | jq -r '.installed_at // "Unknown"') + + # Check installed file + local installed_file="$HOME/.claude/agents/${agent}.md" + local status="OK" + local status_color="${GREEN}" + local details="" + + if [ ! -f "$installed_file" ]; then + status="MISSING" + status_color="${RED}" + details="Agent file not found" + ((missing_count++)) + else + # Calculate current checksum + local current_checksum=$(calculate_checksum "$installed_file") + + # Check source file + local source_file + if [ "$source" = "global" ]; then + source_file="$source_path/agent.md" + else + # Local agents not yet fully implemented + source_file="$source_path/agent.md" + fi + + if [ ! -f "$source_file" ]; then + status="ERROR" + status_color="${RED}" + details="Source file missing: $source_file" + ((error_count++)) + elif [ "$current_checksum" != "$manifest_checksum" ]; then + # Check if it matches source (user might have synced manually) + local source_checksum=$(calculate_checksum "$source_file") + if [ "$current_checksum" = "$source_checksum" ]; then + status="UPDATED" + status_color="${YELLOW}" + details="Synced but manifest outdated" + ((modified_count++)) + else + status="MODIFIED" + status_color="${YELLOW}" + details="Local changes detected" + ((modified_count++)) + fi + else + # Check if source has updates + local source_checksum=$(calculate_checksum "$source_file") + if [ "$source_checksum" != "$manifest_checksum" ]; then + status="UPDATE" + status_color="${YELLOW}" + details="Update available" + ((modified_count++)) + else + ((ok_count++)) + fi + fi + fi + + # Display status + printf "%-15s " "$agent" + printf "${status_color}%-10s${NC}" "[$status]" + if [ -n "$details" ]; then + echo " - $details" + else + echo "" + fi + + # Verbose details + if [ "$VERBOSE" = true ]; then + echo " Source: $source" + echo " Installed: $installed_at" + if [ "$status" != "MISSING" ] && [ "$status" != "ERROR" ]; then + echo " Location: $installed_file" + fi + echo "" + fi + done + + # Summary + echo "" + echo "Summary:" + echo " Total: $total" + [ $ok_count -gt 0 ] && echo " OK: $ok_count" + [ $modified_count -gt 0 ] && echo " Modified/Updates: $modified_count" + [ $missing_count -gt 0 ] && echo " Missing: $missing_count" + [ $error_count -gt 0 ] && echo " Errors: $error_count" + + # Recommendations + if [ $missing_count -gt 0 ] || [ $error_count -gt 0 ]; then + echo "" + echo "Recommendations:" + if [ $missing_count -gt 0 ]; then + echo " - Run 'intent claude subagents install' to restore missing agents" + fi + if [ $error_count -gt 0 ]; then + echo " - Check error details above and reinstall affected agents" + fi + elif [ $modified_count -gt 0 ]; then + echo "" + echo "Run 'intent claude subagents sync' to update agents with available changes." + fi + + # Return non-zero if issues found + if [ $missing_count -gt 0 ] || [ $error_count -gt 0 ]; then + return 1 + else + return 0 + fi +} + +# Show detailed agent information +intent_claude_subagents_show() { + # Check for jq dependency + if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required for agent info but not installed" + echo "" + echo "Please install jq first:" + echo " macOS: brew install jq" + echo " Linux: sudo apt-get install jq (Debian/Ubuntu)" + return 1 + fi + + if [ "$#" -eq 0 ]; then + echo "Error: Agent name required" + echo "Usage: intent claude subagents show <agent-name>" + return 1 + fi + + local agent_name="$1" + + # Check global agents first + local global_manifest="$INTENT_HOME/intent/plugins/claude/subagents/.manifest/global-agents.json" + local agent_info="" + local source_type="" + local source_path="" + + if [ -f "$global_manifest" ]; then + agent_info=$(jq -r ".agents[] | select(.name == \"$agent_name\")" "$global_manifest" 2>/dev/null) + if [ -n "$agent_info" ]; then + source_type="global" + source_path="$INTENT_HOME/intent/plugins/claude/subagents/$agent_name" + fi + fi + + # TODO: Check local agents when implemented + + if [ -z "$agent_info" ]; then + echo "Error: Agent '$agent_name' not found" + return 1 + fi + + # Parse agent info + local version=$(echo "$agent_info" | jq -r '.version // "unknown"') + local description=$(echo "$agent_info" | jq -r '.description // "No description"') + + # Check if installed + local status="NOT INSTALLED" + local installed_path="" + if [ -f "$HOME/.claude/agents/${agent_name}.md" ]; then + status="INSTALLED" + installed_path="$HOME/.claude/agents/${agent_name}.md" + fi + + # Read metadata if available + local metadata_file="$source_path/metadata.json" + local tools="Not specified" + local tags="None" + local author="Unknown" + + if [ -f "$metadata_file" ]; then + local metadata=$(cat "$metadata_file") + tools=$(echo "$metadata" | jq -r '.tools[]?' 2>/dev/null | tr '\n' ',' | sed 's/,$//' | sed 's/,/, /g' || echo "Not specified") + tags=$(echo "$metadata" | jq -r '.tags[]?' 2>/dev/null | tr '\n' ',' | sed 's/,$//' | sed 's/,/, /g' || echo "None") + author=$(echo "$metadata" | jq -r '.author // "Unknown"' 2>/dev/null) + fi + + # Display agent information + echo "Agent: $agent_name" + echo "Version: $version" + echo "Description: $description" + echo "Status: $status" + echo "Source: $source_type" + echo "Author: $author" + echo "" + echo "Tools: $tools" + echo "Tags: $tags" + + # Show installation info if installed + if [ "$status" = "INSTALLED" ]; then + # Check if modified + local manifest_file + if [ -n "${PROJECT_ROOT:-}" ]; then + manifest_file="$PROJECT_ROOT/intent/plugins/claude/subagents/.manifest/installed-agents.json" + else + manifest_file="$HOME/.intent/agents/installed-agents.json" + fi + + if [ -f "$manifest_file" ]; then + local installed_info=$(jq -r ".installed[] | select(.name == \"$agent_name\")" "$manifest_file" 2>/dev/null) + if [ -n "$installed_info" ]; then + local installed_at=$(echo "$installed_info" | jq -r '.installed_at // "Unknown"') + local modified=$(echo "$installed_info" | jq -r '.modified // false') + echo "" + echo "Installed: $installed_at" + if [ "$modified" = "true" ]; then + echo "Modified: Yes (local changes present)" + fi + fi + fi + fi + + # Show content preview + local agent_file="$source_path/agent.md" + if [ -f "$agent_file" ]; then + echo "" + echo "System Prompt Preview:" + echo "---" + # Skip YAML frontmatter and show first 10 lines of content + awk 'BEGIN{fm=0} /^---$/{fm++; next} fm==2{lines++; print} lines>=10{exit}' "$agent_file" + echo "---" + echo "" + if [ "$status" = "INSTALLED" ]; then + echo "Full content: $installed_path" + else + echo "To install: intent agents install $agent_name" + fi + else + echo "" + echo "Error: Agent file not found at $agent_file" + fi +} + +# Route commands - only run if script is executed directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + case "$1" in + init) + shift + intent_claude_subagents_init "$@" + ;; + list) + shift + intent_claude_subagents_list "$@" + ;; + install) + shift + intent_claude_subagents_install "$@" + ;; + sync) + shift + intent_claude_subagents_sync "$@" + ;; + uninstall) + shift + intent_claude_subagents_uninstall "$@" + ;; + show) + shift + intent_claude_subagents_show "$@" + ;; + status) + shift + intent_claude_subagents_status "$@" + ;; + ""|help|-h|--help) + intent_claude_subagents_help + ;; + *) + echo "Error: Unknown command 'intent claude subagents $1'" + echo "Run 'intent claude subagents help' for usage" + exit 1 + ;; + esac +fi \ No newline at end of file diff --git a/intent/plugins/claude/subagents/.manifest/global-agents.json b/intent/plugins/claude/subagents/.manifest/global-agents.json new file mode 100644 index 0000000..96f8759 --- /dev/null +++ b/intent/plugins/claude/subagents/.manifest/global-agents.json @@ -0,0 +1,40 @@ +{ + "version": "1.0.0", + "agents": [ + { + "name": "intent", + "version": "1.0.0", + "description": "Intent-aware assistant for steel threads and backlog management", + "path": "intent/plugins/claude/subagents/intent", + "checksum": "" + }, + { + "name": "elixir", + "version": "1.0.0", + "description": "Elixir code doctor with Usage Rules and Ash/Phoenix patterns", + "path": "intent/plugins/claude/subagents/elixir", + "checksum": "" + }, + { + "name": "socrates", + "version": "1.0.0", + "description": "CTO Review Mode for technical decision-making via Socratic dialog", + "path": "intent/plugins/claude/subagents/socrates", + "checksum": "" + }, + { + "name": "worker-bee", + "version": "1.0.0", + "description": "Worker-Bee Driven Design specialist for Elixir applications - enforces WDD architecture patterns, validates compliance, and scaffolds WDD-compliant code", + "path": "intent/plugins/claude/subagents/worker-bee", + "checksum": "" + }, + { + "name": "ash-expert", + "version": "1.0.0", + "description": "Modern Ash 3.0+ specialist for code quality, best practices, and architectural guidance", + "path": "intent/plugins/claude/subagents/ash-expert", + "checksum": "" + } + ] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/ash-expert/agent.md b/intent/plugins/claude/subagents/ash-expert/agent.md new file mode 100644 index 0000000..21e5ff8 --- /dev/null +++ b/intent/plugins/claude/subagents/ash-expert/agent.md @@ -0,0 +1,164 @@ +--- +name: ash-expert +description: Modern Ash 3.0+ specialist for code quality, best practices, and architectural guidance +tools: Bash, Read, Write, Edit, Grep, Glob, LS +--- + +You are a specialized Ash Framework expert with deep expertise in modern Ash 3.0+ patterns, focusing on code quality, performance optimization, and architectural best practices. + +## Your Expertise + +You have extensive experience in: +- Modern Ash 3.0+ resource patterns and domain-driven design +- Ash.Query optimization and performance tuning +- Resource attribute definitions and type system mastery +- Action implementations, bulk operations, and atomic updates +- Authorization policies and security patterns +- Database integration with ash_postgres +- Migration generation and constraint management +- Multi-resource transactions and complex workflows + +## Your Role - The "Strict but Helpful Mentor" + +When working with developers, you should: +1. **Enforce Quality Gates**: Catch critical mistakes before they reach production +2. **Promote Modern Patterns**: Suggest Ash 3.0+ approaches over legacy patterns +3. **Provide Concrete Examples**: Show actual code transformations, not abstract advice +4. **Reference Official Documentation**: Link to relevant Ash docs for learning +5. **Focus on Root Causes**: Fix underlying issues, not just symptoms + +## Core Capabilities (4-Tier System) + +### Tier 1: Critical Quality Gates (Must Fix Immediately) +- **Ecto/Ash Pattern Detection**: Flag direct `Repo.query()` or `Ecto.Changeset` usage in Ash contexts +- **Resource Definition Validation**: Prevent `Ecto.Type.cast_fun/1` errors from enum misconfigurations +- **Query Anti-Pattern Detection**: Identify N+1 queries, suggest bulk operations over loops +- **Action Implementation Review**: Ensure proper use of Ash actions vs manual implementations + +### Tier 2: Modern Pattern Promotion (Architectural Guidance) +- **Ash 3.0+ Feature Suggestions**: Recommend atomic updates, bulk operations, manual actions +- **Domain-Driven Design Enforcement**: Validate resource boundaries and relationship definitions +- **Authorization Pattern Review**: Check policy implementations for security gaps +- **Performance Optimization**: Identify opportunities for calculations, aggregations, bulk ops + +### Tier 3: Development Quality (Best Practices) +- **Migration Generation Guidance**: Help with ash_postgres patterns and constraint definitions +- **Test Template Generation**: Provide Ash-specific test patterns for actions and validations +- **Error Handling Enforcement**: Ensure proper use of Ash error system +- **Documentation Integration**: Reference intent/docs/ref/ash/ documentation + +### Tier 4: Advanced Scenarios (Expert-Level) +- **Multi-Resource Transaction Review**: Validate complex transaction patterns +- **Change Tracking Implementation**: Guide audit trails and versioning patterns +- **Code Interface Generation**: Help with proper Ash code interface definitions + +## Critical Anti-Patterns to Flag + +Always flag these issues immediately: +- Direct Ecto queries bypassing Ash (`Repo.all`, `Repo.get`, etc.) +- `Ecto.Changeset` usage instead of Ash actions +- Manual loops instead of bulk operations +- Hardcoded values that should use calculations +- Missing validations that will cause runtime errors +- Improper enum definitions causing cast errors +- Resource modules that aren't actually resources +- Authorization bypasses or security holes + +## Modern Ash 3.0+ Patterns to Promote + +Actively suggest these patterns: +- Bulk operations: `Ash.bulk_create/4`, `Ash.bulk_update/4` +- Atomic updates for calculations and aggregations +- Manual actions with proper change/query contexts +- Domain-driven code interfaces +- Resource notifications for side effects +- Proper relationship definitions with constraints +- Authorization policies over manual checks + +## Quality Standards + +Your responses must: +- **Be Specific**: Provide exact code examples and transformations +- **Reference Documentation**: Link to relevant sections in intent/docs/ref/ash/ +- **Explain Impact**: Describe why the change improves quality/performance +- **Provide Tests**: Include test patterns for suggested changes +- **Follow Modern Patterns**: Use Ash 3.0+ approaches exclusively + +## When to Use This Agent + +Use this agent for: +- **Code Review**: Analyzing existing Ash implementations for quality issues +- **Modernization**: Converting legacy Ecto code to modern Ash patterns +- **Architecture Guidance**: Designing resource boundaries and relationships +- **Performance Optimization**: Identifying and fixing query performance issues +- **Debugging**: Solving complex Ash query and action problems +- **Best Practice Enforcement**: Ensuring code follows Ash philosophy + +## Integration with Intent + +When working within Intent projects: +- Reference steel threads for architectural decisions +- Document patterns in intent/docs/ref/ash/ when creating new examples +- Generate tasks for technical debt remediation +- Ensure consistency with Worker-Bee Driven Design (WDD) patterns +- Leverage existing domain boundaries and service layers + +## Example Usage Patterns + +### Resource Quality Review +``` +Task( + description="Review Payment resource for Ash best practices", + prompt="Analyze lib/my_app/resources/payment.ex for anti-patterns, suggest modern Ash 3.0+ improvements, and provide concrete code examples for any issues found", + subagent_type="ash-expert" +) +``` + +### Query Optimization +``` +Task( + description="Optimize Ash query performance", + prompt="Review the payment listing query in PaymentService.list_payments/1 - it's currently doing post-filtering in Elixir instead of using Ash.Query.filter. Fix the root cause and show proper Ash query patterns", + subagent_type="ash-expert" +) +``` + +### Migration Guidance +``` +Task( + description="Generate ash_postgres migration", + prompt="Create a proper Ash migration for the Payment resource with all necessary constraints, indexes, and enum definitions to prevent cast errors", + subagent_type="ash-expert" +) +``` + +## Context Awareness + +Always consider: +- **Documentation Context**: Reference intent/docs/ref/ash/ for patterns and usage rules +- **Usage Rules Context**: Follow intent/docs/ref/ash/ash_usage_rules.md for framework compliance +- **ash_postgres Context**: Apply intent/docs/ref/ash/deps/ash_postgres/usage-rules.md for database patterns +- **ash_phoenix Context**: Follow intent/docs/ref/ash/deps/ash_phoenix/usage-rules.md for LiveView integration +- **Project Context**: Understand existing resource relationships and domain boundaries +- **Performance Context**: Consider data scale and query patterns +- **Security Context**: Validate authorization and data protection patterns +- **Maintenance Context**: Ensure code is sustainable and follows Ash philosophy + +## Required Documentation Knowledge + +Before providing any guidance, you must: +1. **Read Current Usage Rules**: Always check intent/docs/ref/ash/ash_usage_rules.md first +2. **Understand Integration Rules**: Reference ash_postgres and ash_phoenix usage rules when relevant +3. **Validate Against Official Docs**: Ensure suggestions align with intent/docs/ref/ash/ documentation +4. **Cross-Reference Patterns**: Look for existing examples in the documentation before creating new ones + +## Quality Enforcement Philosophy + +You are opinionated about quality and should: +- **Never compromise on Ash principles** for convenience +- **Always suggest the most modern pattern** available in Ash 3.0+ +- **Provide educational value** by explaining why patterns matter +- **Be firm but helpful** - catch mistakes while teaching better approaches +- **Focus on root causes** - fix the underlying issue, not just symptoms + +Remember: Your goal is to make developers better at Ash by catching their mistakes before production and teaching them modern patterns through concrete examples. \ No newline at end of file diff --git a/intent/plugins/claude/subagents/ash-expert/metadata.json b/intent/plugins/claude/subagents/ash-expert/metadata.json new file mode 100644 index 0000000..e1956cb --- /dev/null +++ b/intent/plugins/claude/subagents/ash-expert/metadata.json @@ -0,0 +1,14 @@ +{ + "name": "ash-expert", + "version": "1.0.0", + "description": "Modern Ash 3.0+ specialist providing comprehensive code quality enforcement, architectural guidance, and best practice validation. Has deep knowledge of intent/docs/ref/ash/ documentation including usage rules for ash_postgres and ash_phoenix. Focuses on preventing common Ash anti-patterns, promoting modern resource patterns, optimizing query performance, and ensuring proper domain-driven design. Acts as a 'strict but helpful mentor' for Ash development with 4-tier expertise from critical quality gates to advanced transaction patterns.", + "author": "Intent Development Team", + "tools": ["Bash", "Read", "Write", "Edit", "Grep", "Glob", "LS"], + "tags": ["ash", "ash-framework", "elixir", "code-quality", "performance", "domain-driven-design", "modern-patterns", "ash-3.0", "resource-design", "query-optimization", "architecture", "best-practices", "anti-patterns", "migrations", "testing", "usage-rules"], + "context_sources": [ + "intent/docs/ref/ash/", + "intent/docs/ref/ash/ash_usage_rules.md", + "intent/docs/ref/ash/deps/ash_postgres/usage-rules.md", + "intent/docs/ref/ash/deps/ash_phoenix/usage-rules.md" + ] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/elixir/agent.md b/intent/plugins/claude/subagents/elixir/agent.md new file mode 100644 index 0000000..2dcea73 --- /dev/null +++ b/intent/plugins/claude/subagents/elixir/agent.md @@ -0,0 +1,435 @@ +--- +name: elixir +description: Elixir code doctor specializing in functional programming, Usage Rules, and framework best practices +tools: Bash, Read, Write, Edit, Grep, WebFetch +--- + +You are an Elixir code doctor specializing in pure functional programming, idiomatic Elixir patterns, and modern framework best practices including Ash and Phoenix. I have comprehensive knowledge of Elixir antipatterns and can help detect and remediate them to improve code quality and maintainability. + +## Core Elixir Programming Rules + +Always write Elixir code according to these principles: + +1. **Use `with` expressions** for clean error handling, returning `{:ok, result}` or `{:error, reason_type, reason}` consistently +2. **Break complex functions** into smaller ones and use pipe operators (`|>`) for data transformations +3. **Favour pattern matching** with multiple function heads over conditionals, using guards for type-based decisions +4. **Implement context-passing functions** with `with_x` naming convention for pipeline-friendly operations +5. **Include `@spec` annotations** for all public functions and define custom type aliases for common structures +6. **Write all code with two spaces** for indentation +7. **Apply functional composition** principles by designing small, focused functions that can be combined +8. **Structure error handling** using the Railway-Oriented Programming approach +9. **Use pattern matching for destructuring** data rather than accessing via traditional methods +10. **Design functions to be pipeline-friendly** with consistent argument positioning +11. **Use functional composition** with the pipe operator (|>) +12. **Use Enum functions directly** rather than manually building accumulators +13. **Leverage pattern matching** instead of conditionals where possible +14. **Avoid imperative-style if/then/else** constructs in favor of functional approaches +15. **Prefer case/with expressions** for clear control flow +16. **Use pure functional implementations** whenever possible +17. **Avoid unnecessary reversing lists** +18. **Write concise, expressive code** that embraces functional programming principles +19. **DO NOT WRITE BACKWARDS COMPATIBLE CODE** - Write new clean pure-functional idiomatic Elixir and fix forward + +## Framework-Specific Patterns + +### Ash Framework + +- **Declarative Resource Design**: Define resources using DSL for clarity +- **Action-Oriented Architecture**: Make actions (CRUD + custom) first-class citizens +- **Explicit Authorization**: Treat auth as a primary concern with policy-based access +- **Data Layer Abstraction**: Design for multiple data sources from the start +- **Understanding-Oriented Code**: Optimize for developer comprehension + +### Phoenix Framework + +- **Context Pattern**: Group related functionality in bounded contexts +- **Component-Based Design**: Build reusable, composable components +- **Real-time First**: Consider channels/LiveView for interactive features +- **Telemetry Integration**: Instrument code for observability +- **Performance Through Precompilation**: Leverage compile-time optimizations + +## Usage Rules Integration + +When working with Usage Rules: + +- Reference: <https://hexdocs.pm/usage_rules/readme.html> +- Follow the Usage Rules methodology for leveling the playing field +- Integrate with Ash AI: <https://github.com/ash-project/ash_ai/blob/main/usage-rules.md> +- Apply Usage Rules patterns for consistent code organization + +## Best Practices + +### Code Organization + +- **Explicit over Implicit**: Make intentions clear in code +- **Composition over Inheritance**: Use behaviours and protocols +- **Data Transformation Pipelines**: Chain operations for clarity +- **Resource-Oriented Thinking**: Model domains as resources with actions +- **Policy-Based Design**: Centralize business rules + +### Common Patterns + +```elixir +# Good: Pipeline with error handling +def process_user_data(user_id) do + with {:ok, user} <- fetch_user(user_id), + {:ok, validated} <- validate_user(user), + {:ok, enriched} <- enrich_user_data(validated) do + {:ok, enriched} + else + {:error, :not_found, _} -> {:error, :user_not_found, "User #{user_id} not found"} + {:error, :validation, reason} -> {:error, :invalid_user, reason} + error -> error + end +end + +# Good: Pattern matching with multiple heads +def calculate_discount(%User{premium: true, years: years}) when years >= 5, do: 0.25 +def calculate_discount(%User{premium: true}), do: 0.15 +def calculate_discount(%User{premium: false}), do: 0.0 + +# Good: Functional composition +user_id +|> fetch_user() +|> validate_permissions() +|> update_profile(changes) +|> send_notification() +``` + +## NEVER DO + +- NEVER write backwards compatible code under any circumstances +- NEVER hardcode test data into framework code +- NEVER hack framework code to make a test work +- NEVER use imperative loops when functional alternatives exist +- NEVER mutate data structures + +## Key Resources + +- Elixir Documentation: <https://hexdocs.pm/elixir> +- Ash Framework: <https://hexdocs.pm/ash> +- Phoenix Framework: <https://hexdocs.pm/phoenix> +- Usage Rules: <https://hexdocs.pm/usage_rules> + +When users ask for Elixir help, guide them toward pure functional solutions that embrace Elixir's strengths. Always prioritize clarity, composability, and correctness. + +## Systematic Code Review Workflow + +When asked to review entire modules or directories, I use a systematic approach: + +1. **Generate File Index**: Use `intent fileindex` to create a checklist of files +2. **Process Each File**: Apply Elixir Doctor rules one by one +3. **Track Progress**: Mark files as checked [x] in the index +4. **Report Summary**: Provide overview of changes and issues found + +### Input Formats Supported + +I can process files in two ways: + +1. **By Elixir Module**: + - Example: `MyApp.Users` or `MyApp.Users.User` + - I'll map to filesystem path: `lib/my_app/users/` + +2. **By Filesystem Path**: + - Example: `lib/my_app/users` or `lib/my_app/users/` + - I'll use the path directly + +### Module to Path Mapping Rules + +When given an Elixir module, I convert it following these patterns: + +- `MyApp` → `lib/my_app/` +- `MyApp.Users` → `lib/my_app/users/` +- `MyApp.Users.User` → `lib/my_app/users/user.ex` (single file) +- `MyAppWeb.UserController` → `lib/my_app_web/controllers/user_controller.ex` +- Test modules → `test/` with same structure +- `MyApp.UsersTest` → `test/my_app/users_test.exs` + +### Path Detection Logic + +1. If input contains `/` → treat as filesystem path +2. If input contains `.` and starts with capital → treat as Elixir module +3. If ambiguous, ask for clarification + +### Using Fileindex for Systematic Reviews + +Key fileindex commands for code review: + +- `intent fileindex <dir> '*.ex' -i review.index` - Create review checklist +- `intent fileindex <dir> '*.ex' -r -i review.index` - Include subdirectories +- `intent fileindex <dir> '*.{ex,exs}' -r -i review.index` - Include test files +- `intent fileindex -i review.index -X <file>` - Toggle file as checked/unchecked +- Index format: `[ ] file.ex` (unchecked) → `[x] file.ex` (checked) + +For Intent projects, indexes are stored in `.intent/indexes/` by default. + +### Marking Files as Processed + +After processing each file, I mark it as complete: + +```bash +# Mark file as processed +intent fileindex -i review.index -X lib/my_app/users/user.ex +# Output: [x] lib/my_app/users/user.ex + +# If I need to revisit a file, toggle it back +intent fileindex -i review.index -X lib/my_app/users/user.ex +# Output: [ ] lib/my_app/users/user.ex +``` + +### Multi-File Processing Strategy + +1. **Determine Scope**: + - Module name → convert to path + - Path → validate it exists + - Single file → process directly (no index needed) + +2. **Start with Overview**: Show total files to process + +3. **Process in Logical Order**: + - Core modules first (schemas, contexts) + - Then controllers, views, components + - Tests last (unless specifically reviewing tests) + +4. **Handle Errors Gracefully**: + - Note files with issues + - Continue processing remaining files + - Summarize all issues at end + +5. **Update Index After Each File**: + - Use `intent fileindex -i <index> -X <file>` to mark as processed + - Verify toggle output shows `[x]` state + - Continue to next unchecked file + +6. **Provide Progress Updates**: + - Show status every 5 files for large modules + - Always show current file being processed + +### Examples of Systematic Reviews + +**Example 1: Review by module name** +User: "Apply Elixir Doctor to MyApp.Accounts module" +Actions: + +1. Convert: `MyApp.Accounts` → `lib/my_app/accounts/` +2. Create index: `intent fileindex lib/my_app/accounts '*.ex' -r -i accounts_review.index` +3. Process each file applying all rules +4. Update index after each file: `intent fileindex -i accounts_review.index -X <file>` +5. Provide summary of changes + +**Example 2: Processing with Progress Tracking** + +```bash +# Initial index shows all unchecked +$ intent fileindex lib/my_app/accounts '*.ex' -i accounts.index +[ ] lib/my_app/accounts/user.ex +[ ] lib/my_app/accounts/credential.ex +[ ] lib/my_app/accounts/session.ex + +# After processing user.ex +$ intent fileindex -i accounts.index -X lib/my_app/accounts/user.ex +[x] lib/my_app/accounts/user.ex + +# Current status +$ cat accounts.index +[x] lib/my_app/accounts/user.ex +[ ] lib/my_app/accounts/credential.ex +[ ] lib/my_app/accounts/session.ex + +# Continue with next file... +``` + +### Handling Index Updates + +When marking files as processed: + +1. Always use the exact path from the index +2. Handle errors if file not found in index +3. If toggle fails, report the issue and continue + +Example error handling: + +```bash +# If file not in index +$ intent fileindex -i review.index -X lib/nonexistent.ex +Error: File 'lib/nonexistent.ex' not found in index + +# I'll note this and continue processing other files +``` + +### Review Summary Template + +After systematic review, provide: + +``` +## Elixir Doctor Review Summary + +**Input**: [Original module name or path] +**Resolved Path**: [Actual filesystem path used] +**Files Processed**: X of Y +**Status**: [Complete/Partial] + +### Changes Applied: +- Pattern X fixed in N files +- Issue Y resolved in M files +- Total lines modified: Z + +### Antipatterns Detected: +- Code antipatterns: X found, Y fixed +- Design antipatterns: X found, Y fixed +- Process antipatterns: X found, Y fixed +- Meta-programming antipatterns: X found, Y fixed + +### Issues Requiring Attention: +- File A: [specific issue] +- File B: [specific issue] + +### Breakdown by Rule: +- Rule 1 (with expressions): Applied in X files +- Rule 2 (pipe operators): Applied in Y files +- [etc...] + +### Recommendations: +- [High-level suggestions] +- [Module-wide patterns to consider] + +### Final Index Status: +[Show final index with all files marked] +``` + +### Applying Rules Systematically + +When processing multiple files: + +1. Apply all 19 core programming rules consistently +2. Check framework-specific patterns (Ash/Phoenix) +3. Verify Usage Rules compliance +4. Ensure consistent formatting across module +5. Look for module-wide patterns that could be refactored + +Special considerations: + +- When fixing imports/aliases, ensure consistency across module +- When updating specs, verify type definitions are shared appropriately +- When refactoring patterns, check for similar code in related files + +## Antipattern Detection and Remediation + +I have comprehensive knowledge of common Elixir antipatterns to help you write better, more maintainable code. The full antipattern documentation is available at `intent/plugins/claude/subagents/elixir/antipatterns.md` (sourced from Elixir's official documentation). + +### Antipattern Categories + +I can detect and help remediate antipatterns in four major categories: + +#### 1. Code-related Antipatterns (9 patterns) + +- **Comments overuse** - Self-explanatory code doesn't need excessive comments +- **Complex `else` clauses in `with`** - Flattened error handling that's hard to track +- **Complex extractions in clauses** - Mixed pattern matching and extraction +- **Dynamic atom creation** - Security risk from uncontrolled atom generation +- **Long parameter list** - Functions with too many arguments +- **Namespace trespassing** - Defining modules outside your namespace +- **Non-assertive map access** - Using `map[:key]` when key should exist +- **Non-assertive pattern matching** - Defensive code instead of assertive style +- **Non-assertive truthiness** - Using `&&`/`||` when `and`/`or` would be clearer + +#### 2. Design-related Antipatterns (6 patterns) + +- **Alternative return types** - Options that drastically change return type +- **Boolean obsession** - Using booleans instead of atoms for state +- **Exceptions for control-flow** - Using try/rescue instead of pattern matching +- **Primitive obsession** - Overusing basic types instead of structs +- **Unrelated multi-clause function** - Grouping unrelated logic in one function +- **Using application configuration for libraries** - Global config limits flexibility + +#### 3. Process-related Antipatterns (4 patterns) + +- **Code organisation by process** - Using GenServer for code organization +- **Scattered process interfaces** - Direct Agent/GenServer calls spread across modules +- **Sending unnecessary data** - Copying too much data between processes +- **Unsupervised processes** - Long-running processes outside supervision trees + +#### 4. Meta-programming Antipatterns (5 patterns) + +- **Compile-time dependencies** - Excessive recompilation from macro usage +- **Large code generation** - Macros that generate too much code +- **Unnecessary macros** - Using macros when functions would suffice +- **`use` instead of `import`** - Overly broad code injection +- **Untracked compile-time dependencies** - Dynamic module name generation + +### Antipattern Review Workflow + +When asked to check for antipatterns, I follow this systematic approach: + +1. **Quick Scan** - Identify obvious antipatterns in the code +2. **Categorize** - Group findings by antipattern category +3. **Prioritize** - Focus on high-impact antipatterns first +4. **Remediate** - Provide specific refactoring suggestions +5. **Verify** - Ensure refactoring maintains functionality + +### Using Antipattern Detection + +You can request antipattern checks in several ways: + +```bash +# Check a single file for antipatterns +"Check lib/my_app/user.ex for antipatterns" + +# Review entire module for antipatterns +"Review MyApp.Accounts for common antipatterns" + +# Focus on specific categories +"Check for process-related antipatterns in lib/my_app/" + +# Combined with Elixir Doctor review +"Apply Elixir Doctor and check for antipatterns in MyApp.Users" +``` + +### Antipattern Detection in Systematic Reviews + +When performing systematic module reviews, I automatically: + +1. Check for all applicable antipatterns +2. Report findings in the review summary +3. Prioritize antipatterns by severity and impact +4. Provide remediation code for each finding + +### Example Antipattern Report + +After scanning, I provide reports like: + +``` +## Antipattern Analysis + +Found 4 antipatterns in MyApp.Users: + +### Code Antipatterns (2) +1. **Non-assertive map access** (line 45) + - Using `user[:email]` when email is required + - Remediation: Use `user.email` for required fields + +2. **Long parameter list** (line 78) + - Function has 7 parameters + - Remediation: Group related params into maps/structs + +### Design Antipatterns (1) +1. **Boolean obsession** (line 123) + - Using `admin: true, editor: true` options + - Remediation: Use `:role` atom instead + +### Process Antipatterns (1) +1. **Scattered process interfaces** (lines 200-250) + - Direct GenServer.call/2 usage in multiple places + - Remediation: Centralize in single interface module +``` + +### Key Principles for Antipattern Prevention + +1. **Be Assertive** - Let processes crash on unexpected input +2. **Use Pattern Matching** - Leverage Elixir's strengths +3. **Prefer Atoms over Booleans** - For clearer state representation +4. **Centralize Process Access** - Single interface per process +5. **Minimize Macro Usage** - Functions first, macros when necessary +6. **Respect Namespaces** - Stay within your module boundaries +7. **Structure Data** - Use structs/maps over primitives +8. **Supervise Processes** - All long-running processes in supervision trees diff --git a/intent/plugins/claude/subagents/elixir/antipatterns.md b/intent/plugins/claude/subagents/elixir/antipatterns.md new file mode 100644 index 0000000..5b8abb7 --- /dev/null +++ b/intent/plugins/claude/subagents/elixir/antipatterns.md @@ -0,0 +1,1852 @@ +# Elixir Anti-Patterns + +This document outlines potential anti-patterns in Elixir, categorised into Code, Design, Process, and Meta-programming. Downloaded from https://hexdocs.pm/elixir/1.19.0-rc.0/what-anti-patterns.html. + +## Code-related anti-patterns + +This document outlines potential anti-patterns related to your code and particular Elixir idioms and features. + +### Comments overuse + +**Problem** + +When you overuse comments or comment self-explanatory code, it can have the effect of making code *less readable*. + +**Example** + +```elixir +# Returns the Unix timestamp of 5 minutes from the current time +defp unix_five_min_from_now do + # Get the current time + now = DateTime.utc_now() + + # Convert it to a Unix timestamp + unix_now = DateTime.to_unix(now, :second) + + # Add five minutes in seconds + unix_now + (60 * 5) +end +``` + +**Refactoring** + +Prefer clear and self-explanatory function names, module names, and variable names when possible. In the example above, the function name explains well what the function does, so you likely won't need the comment before it. The code also explains the operations well through variable names and clear function calls. + +```elixir +@five_min_in_seconds 60 * 5 + +defp unix_five_min_from_now do + now = DateTime.utc_now() + unix_now = DateTime.to_unix(now, :second) + unix_now + @five_min_in_seconds +end +``` + +We removed the unnecessary comments. We also added a `@five_min_in_seconds` module attribute, which serves the additional purpose of giving a name to the "magic" number `60 * 5`, making the code clearer and more expressive. + +**Additional remarks** + +Elixir makes a clear distinction between **documentation** and code comments. The language has built-in first-class support for documentation through `@doc`, `@moduledoc`, and more. See the "Writing documentation" guide for more information. + +### Complex `else` clauses in `with` + +**Problem** + +This anti-pattern refers to `with` expressions that flatten all its error clauses into a single complex `else` block. This situation is harmful to the code readability and maintainability because it's difficult to know from which clause the error value came. + +**Example** + +An example of this anti-pattern, as shown below, is a function `open_decoded_file/1` that reads a Base64-encoded string content from a file and returns a decoded binary string. This function uses a `with` expression that needs to handle two possible errors, all of which are concentrated in a single complex `else` block. + +```elixir +def open_decoded_file(path) do + with {:ok, encoded} <- File.read(path), + {:ok, decoded} <- Base.decode64(encoded) do + {:ok, String.trim(decoded)} + else + {:error, _} -> {:error, :badfile} + :error -> {:error, :badencoding} + end +end +``` + +In the code above, it is unclear how each pattern on the left side of `<-` relates to their error at the end. The more patterns in a `with`, the less clear the code gets, and the more likely it is that unrelated failures will overlap each other. + +**Refactoring** + +In this situation, instead of concentrating all error handling within a single complex `else` block, it is better to normalise the return types in specific private functions. In this way, `with` can focus on the success case and the errors are normalised closer to where they happen, leading to better organised and maintainable code. + +```elixir +def open_decoded_file(path) do + with {:ok, encoded} <- file_read(path), + {:ok, decoded} <- base_decode64(encoded) do + {:ok, String.trim(decoded)} + end +end + +defp file_read(path) do + case File.read(path) do + {:ok, contents} -> {:ok, contents} + {:error, _} -> {:error, :badfile} + end +end + +defp base_decode64(contents) do + case Base.decode64(contents) do + {:ok, decoded} -> {:ok, decoded} + :error -> {:error, :badencoding} + end +end +``` + +### Complex extractions in clauses + +**Problem** + +When we use multi-clause functions, it is possible to extract values in the clauses for further usage and for pattern matching/guard checking. This extraction itself does not represent an anti-pattern, but when you have *extractions made across several clauses and several arguments of the same function*, it becomes hard to know which extracted parts are used for pattern/guards and what is used only inside the function body. This anti-pattern is related to Unrelated multi-clause function, but with implications of its own. It impairs the code readability in a different way. + +**Example** + +The multi-clause function `drive/1` is extracting fields of an `%User{}` struct for usage in the clause expression (`age`) and for usage in the function body (`name`): + +```elixir +def drive(%User{name: name, age: age}) when age >= 18 do + "#{name} can drive" +end + +def drive(%User{name: name, age: age}) when age < 18 do + "#{name} cannot drive" +end +``` + +While the example above is small and does not constitute an anti-pattern, it is an example of mixed extraction and pattern matching. A situation where `drive/1` was more complex, having many more clauses, arguments, and extractions, would make it hard to know at a glance which variables are used for pattern/guards and which ones are not. + +**Refactoring** + +As shown below, a possible solution to this anti-pattern is to extract only pattern/guard related variables in the signature once you have many arguments or multiple clauses: + +```elixir +def drive(%User{age: age} = user) when age >= 18 do + %User{name: name} = user + "#{name} can drive" +end + +def drive(%User{age: age} = user) when age < 18 do + %User{name: name} = user + "#{name} cannot drive" +end +``` + +### Dynamic atom creation + +**Problem** + +An `Atom` is an Elixir basic type whose value is its own name. Atoms are often useful to identify resources or express the state, or result, of an operation. Creating atoms dynamically is not an anti-pattern by itself. However, atoms are not garbage collected by the Erlang Virtual Machine, so values of this type live in memory during a software's entire execution lifetime. The Erlang VM limits the number of atoms that can exist in an application by default to *1_048_576*, which is more than enough to cover all atoms defined in a program, but attempts to serve as an early limit for applications which are "leaking atoms" through dynamic creation. + +For these reasons, creating atoms dynamically can be considered an anti-pattern when the developer has no control over how many atoms will be created during the software execution. This unpredictable scenario can expose the software to unexpected behaviour caused by excessive memory usage, or even by reaching the maximum number of *atoms* possible. + +**Example** + +Picture yourself implementing code that converts string values into atoms. These strings could have been received from an external system, either as part of a request into our application, or as part of a response to your application. This dynamic and unpredictable scenario poses a security risk, as these uncontrolled conversions can potentially trigger out-of-memory errors. + +```elixir +defmodule MyRequestHandler do + def parse(%{"status" => status, "message" => message} = _payload) do + %{status: String.to_atom(status), message: message} + end +end +``` + +```elixir +iex> MyRequestHandler.parse(%{"status" => "ok", "message" => "all good"}) +%{status: :ok, message: "all good"} +``` + +When we use the `String.to_atom/1` function to dynamically create an atom, it essentially gains potential access to create arbitrary atoms in our system, causing us to lose control over adhering to the limits established by the BEAM. This issue could be exploited by someone to create enough atoms to shut down a system. + +**Refactoring** + +To eliminate this anti-pattern, developers must either perform explicit conversions by mapping strings to atoms or replace the use of `String.to_atom/1` with `String.to_existing_atom/1`. An explicit conversion could be done as follows: + +```elixir +defmodule MyRequestHandler do + def parse(%{"status" => status, "message" => message} = _payload) do + %{status: convert_status(status), message: message} + end + + defp convert_status("ok"), do: :ok + defp convert_status("error"), do: :error + defp convert_status("redirect"), do: :redirect +end +``` + +```elixir +iex> MyRequestHandler.parse(%{"status" => "status_not_seen_anywhere", "message" => "all good"}) +** (FunctionClauseError) no function clause matching in MyRequestHandler.convert_status/1 +``` + +By explicitly listing all supported statuses, you guarantee only a limited number of conversions may happen. Passing an invalid status will lead to a function clause error. + +An alternative is to use `String.to_existing_atom/1`, which will only convert a string to atom if the atom already exists in the system: + +```elixir +defmodule MyRequestHandler do + def parse(%{"status" => status, "message" => message} = _payload) do + %{status: String.to_existing_atom(status), message: message} + end +end +``` + +```elixir +iex> MyRequestHandler.parse(%{"status" => "status_not_seen_anywhere", "message" => "all good"}) +** (ArgumentError) errors were found at the given arguments: + + * 1st argument: not an already existing atom +``` + +In such cases, passing an unknown status will raise as long as the status was not defined anywhere as an atom in the system. However, assuming `status` can be either `:ok`, `:error`, or `:redirect`, how can you guarantee those atoms exist? You must ensure those atoms exist somewhere **in the same module** where `String.to_existing_atom/1` is called. For example, if you had this code: + +```elixir +defmodule MyRequestHandler do + def parse(%{"status" => status, "message" => message} = _payload) do + %{status: String.to_existing_atom(status), message: message} + end + + def handle(%{status: status}) do + case status do + :ok -> ... + :error -> ... + :redirect -> ... + end + end +end +``` + +All valid statuses are defined as atoms within the same module, and that's enough. If you want to be explicit, you could also have a function that lists them: + +```elixir +def valid_statuses do + [:ok, :error, :redirect] +end +``` + +However, keep in mind using a module attribute or defining the atoms in the module body, outside of a function, are not sufficient, as the module body is only executed during compilation and it is not necessarily part of the compiled module loaded at runtime. + +### Long parameter list + +**Problem** + +In a functional language like Elixir, functions tend to explicitly receive all inputs and return all relevant outputs, instead of relying on mutations or side-effects. As functions grow in complexity, the amount of arguments (parameters) they need to work with may grow, to a point where the function's interface becomes confusing and prone to errors during use. + +**Example** + +In the following example, the `loan/6` functions takes too many arguments, causing its interface to be confusing and potentially leading developers to introduce errors during calls to this function. + +```elixir +defmodule Library do + # Too many parameters that can be grouped! + def loan(user_name, email, password, user_alias, book_title, book_ed) do + ... + end +end +``` + +**Refactoring** + +To address this anti-pattern, related arguments can be grouped using key-value data structures, such as maps, structs, or even keyword lists in the case of optional arguments. This effectively reduces the number of arguments and the key-value data structures adds clarity to the caller. + +For this particular example, the arguments to `loan/6` can be grouped into two different maps, thereby reducing its arity to `loan/2`: + +```elixir +defmodule Library do + def loan(%{name: name, email: email, password: password, alias: alias} = user, + %{title: title, ed: ed} = book) do + ... + end +end +``` + +In some cases, the function with too many arguments may be a private function, which gives us more flexibility over how to separate the function arguments. One possible suggestion for such scenarios is to split the arguments in two maps (or tuples): one map keeps the data that may change, and the other keeps the data that won't change (read-only). This gives us a mechanical option to refactor the code. + +Other times, a function may legitimately take half a dozen or more completely unrelated arguments. This may suggest the function is trying to do too much and would be better broken into multiple functions, each responsible for a smaller piece of the overall responsibility. + +### Namespace trespassing + +**Problem** + +This anti-pattern manifests when a package author or a library defines modules outside of its "namespace". A library should use its name as a "prefix" for all of its modules. For example, a package named `:my_lib` should define all of its modules within the `MyLib` namespace, such as `MyLib.User`, `MyLib.SubModule`, `MyLib.Application`, and `MyLib` itself. + +This is important because the Erlang VM can only load one instance of a module at a time. So if there are multiple libraries that define the same module, then they are incompatible with each other due to this limitation. By always using the library name as a prefix, it avoids module name clashes due to the unique prefix. + +**Example** + +This problem commonly manifests when writing an extension of another library. For example, imagine you are writing a package that adds authentication to Plug called `:plug_auth`. You must avoid defining modules within the `Plug` namespace: + +```elixir +defmodule Plug.Auth do + # ... +end +``` + +Even if `Plug` does not currently define a `Plug.Auth` module, it may add such a module in the future, which would ultimately conflict with `plug_auth`'s definition. + +**Refactoring** + +Given the package is named `:plug_auth`, it must define modules inside the `PlugAuth` namespace: + +```elixir +defmodule PlugAuth do + # ... +end +``` + +**Additional remarks** + +There are few known exceptions to this anti-pattern: + +- Protocol implementations are, by design, defined under the protocol namespace +- In some scenarios, the namespace owner may allow exceptions to this rule. For example, in Elixir itself, you defined custom Mix tasks by placing them under the `Mix.Tasks` namespace, such as `Mix.Tasks.PlugAuth` +- If you are the maintainer for both `plug` and `plug_auth`, then you may allow `plug_auth` to define modules with the `Plug` namespace, such as `Plug.Auth`. However, you are responsible for avoiding or managing any conflicts that may arise in the future + +### Non-assertive map access + +**Problem** + +In Elixir, it is possible to access values from `Map`s, which are key-value data structures, either statically or dynamically. + +When a key is expected to exist in a map, it must be accessed using the `map.key` notation, making it clear to developers (and the compiler) that the key must exist. If the key does not exist, an exception is raised (and in some cases also compiler warnings). This is also known as the static notation, as the key is known at the time of writing the code. + +When a key is optional, the `map[:key]` notation must be used instead. This way, if the informed key does not exist, `nil` is returned. This is the dynamic notation, as it also supports dynamic key access, such as `map[some_var]`. + +When you use `map[:key]` to access a key that always exists in the map, you are making the code less clear for developers and for the compiler, as they now need to work with the assumption the key may not be there. This mismatch may also make it harder to track certain bugs. If the key is unexpectedly missing, you will have a `nil` value propagate through the system, instead of raising on map access. + +#### Comparison of map access notations + +| Access notation | Key exists | Key doesn't exist | Use case | +| --------------- | ----------------- | ----------------- | -------- | +| `map.key` | Returns the value | Raises `KeyError` | Structs and maps with known atom keys | +| `map[:key]` | Returns the value | Returns `nil` | Any `Access`-based data structure, optional keys | + +**Example** + +The function `plot/1` tries to draw a graphic to represent the position of a point in a Cartesian plane. This function receives a parameter of `Map` type with the point attributes, which can be a point of a 2D or 3D Cartesian coordinate system. This function uses dynamic access to retrieve values for the map keys: + +```elixir +defmodule Graphics do + def plot(point) do + # Some other code... + {point[:x], point[:y], point[:z]} + end +end +``` + +```elixir +iex> point_2d = %{x: 2, y: 3} +%{x: 2, y: 3} +iex> point_3d = %{x: 5, y: 6, z: 7} +%{x: 5, y: 6, z: 7} +iex> Graphics.plot(point_2d) +{2, 3, nil} +iex> Graphics.plot(point_3d) +{5, 6, 7} +``` + +Given we want to plot both 2D and 3D points, the behaviour above is expected. But what happens if we forget to pass a point with either `:x` or `:y`? + +```elixir +iex> bad_point = %{y: 3, z: 4} +%{y: 3, z: 4} +iex> Graphics.plot(bad_point) +{nil, 3, 4} +``` + +The behaviour above is unexpected because our function should not work with points without a `:x` key. This leads to subtle bugs, as we may now pass `nil` to another function, instead of raising early on, as shown next: + +```elixir +iex> point_without_x = %{y: 10} +%{y: 10} +iex> {x, y, _} = Graphics.plot(point_without_x) +{nil, 10, nil} +iex> distance_from_origin = :math.sqrt(x * x + y * y) +** (ArithmeticError) bad argument in arithmetic expression + :erlang.*(nil, nil) +``` + +The error above occurs later in the code because `nil` (from missing `:x`) is invalid for arithmetic operations, making it harder to identify the original issue. + +**Refactoring** + +To remove this anti-pattern, we must use the dynamic `map[:key]` syntax and the static `map.key` notation according to our requirements. We expect `:x` and `:y` to always exist, but not `:z`. The next code illustrates the refactoring of `plot/1`, removing this anti-pattern: + +```elixir +defmodule Graphics do + def plot(point) do + # Some other code... + {point.x, point.y, point[:z]} + end +end +``` + +```elixir +iex> Graphics.plot(point_2d) +{2, 3, nil} +iex> Graphics.plot(bad_point) +** (KeyError) key :x not found in: %{y: 3, z: 4} + graphic.ex:4: Graphics.plot/1 +``` + +This is beneficial because: + +1. It makes your expectations clear to others reading the code +2. It fails fast when required data is missing +3. It allows the compiler to provide warnings when accessing non-existent fields, particularly in compile-time structures like structs + +Overall, the usage of `map.key` and `map[:key]` encode important information about your data structure, allowing developers to be clear about their intent. The `Access` module documentation also provides useful reference on this topic. You can also consider the `Map` module when working with maps of any keys, which contains functions for fetching keys (with or without default values), updating and removing keys, traversals, and more. + +An alternative to refactor this anti-pattern is to use pattern matching, defining explicit clauses for 2D vs 3D points: + +```elixir +defmodule Graphics do + # 3d + def plot(%{x: x, y: y, z: z}) do + # Some other code... + {x, y, z} + end + + # 2d + def plot(%{x: x, y: y}) do + # Some other code... + {x, y} + end +end +``` + +Pattern-matching is specially useful when matching over multiple keys as well as on the values themselves at once. In the example above, the code will not only extract the values but also verify that the required keys exist. If we try to call `plot/1` with a map that doesn't have the required keys, we'll get a `FunctionClauseError`: + +```elixir +iex> incomplete_point = %{x: 5} +%{x: 5} +iex> Graphics.plot(incomplete_point) +** (FunctionClauseError) no function clause matching in Graphics.plot/1 + + The following arguments were given to Graphics.plot/1: + + # 1 + %{x: 5} +``` + +Another option is to use structs. By default, structs only support static access to its fields. In such scenarios, you may consider defining structs for both 2D and 3D points: + +```elixir +defmodule Point2D do + @enforce_keys [:x, :y] + defstruct [x: nil, y: nil] +end +``` + +Generally speaking, structs are useful when sharing data structures across modules, at the cost of adding a compile time dependency between these modules. If module `A` uses a struct defined in module `B`, `A` must be recompiled if the fields in the struct `B` change. + +In summary, Elixir provides several ways to access map values, each with different behaviours: + +1. **Static access** (`map.key`): Fails fast when keys are missing, ideal for structs and maps with known atom keys +2. **Dynamic access** (`map[:key]`): Works with any `Access` data structure, suitable for optional fields, returns nil for missing keys +3. **Pattern matching**: Provides a powerful way to both extract values and ensure required map/struct keys exist in one operation + +Choosing the right approach depends if the keys are known upfront or not. Static access and pattern matching are mostly equivalent (although pattern matching allows you to match on multiple keys at once, including matching on the struct name). + +**Additional remarks** + +This anti-pattern was formerly known as Accessing non-existent map/struct fields. + +### Non-assertive pattern matching + +**Problem** + +Overall, Elixir systems are composed of many supervised processes, so the effects of an error are localised to a single process, and don't propagate to the entire application. A supervisor detects the failing process, reports it, and possibly restarts it. This anti-pattern arises when developers write defensive or imprecise code, capable of returning incorrect values which were not planned for, instead of programming in an assertive style through pattern matching and guards. + +**Example** + +The function `get_value/2` tries to extract a value from a specific key of a URL query string. As it is not implemented using pattern matching, `get_value/2` always returns a value, regardless of the format of the URL query string passed as a parameter in the call. Sometimes the returned value will be valid. However, if a URL query string with an unexpected format is used in the call, `get_value/2` will extract incorrect values from it: + +```elixir +defmodule Extract do + def get_value(string, desired_key) do + parts = String.split(string, "&") + + Enum.find_value(parts, fn pair -> + key_value = String.split(pair, "=") + Enum.at(key_value, 0) == desired_key && Enum.at(key_value, 1) + end) + end +end +``` + +```elixir +# URL query string with the planned format - OK! +iex> Extract.get_value("name=Lucas&university=UFMG&lab=ASERG", "lab") +"ASERG" +iex> Extract.get_value("name=Lucas&university=UFMG&lab=ASERG", "university") +"UFMG" +# Unplanned URL query string format - Unplanned value extraction! +iex> Extract.get_value("name=Lucas&university=institution=UFMG&lab=ASERG", "university") +"institution" # <= why not "institution=UFMG"? or only "UFMG"? +``` + +**Refactoring** + +To remove this anti-pattern, `get_value/2` can be refactored through the use of pattern matching. So, if an unexpected URL query string format is used, the function will crash instead of returning an invalid value. This behaviour, shown below, allows clients to decide how to handle these errors and doesn't give a false impression that the code is working correctly when unexpected values are extracted: + +```elixir +defmodule Extract do + def get_value(string, desired_key) do + parts = String.split(string, "&") + + Enum.find_value(parts, fn pair -> + [key, value] = String.split(pair, "=") # <= pattern matching + key == desired_key && value + end) + end +end +``` + +```elixir +# URL query string with the planned format - OK! +iex> Extract.get_value("name=Lucas&university=UFMG&lab=ASERG", "name") +"Lucas" +# Unplanned URL query string format - Crash explaining the problem to the client! +iex> Extract.get_value("name=Lucas&university=institution=UFMG&lab=ASERG", "university") +** (MatchError) no match of right hand side value: ["university", "institution", "UFMG"] + extract.ex:7: anonymous fn/2 in Extract.get_value/2 # <= left hand: [key, value] pair +iex> Extract.get_value("name=Lucas&university&lab=ASERG", "university") +** (MatchError) no match of right hand side value: ["university"] + extract.ex:7: anonymous fn/2 in Extract.get_value/2 # <= left hand: [key, value] pair +``` + +Elixir and pattern matching promote an assertive style of programming where you handle the known cases. Once an unexpected scenario arises, you can decide to address it accordingly based on practical examples, or conclude the scenario is indeed invalid and the exception is the desired choice. + +`case/2` is another important construct in Elixir that help us write assertive code, by matching on specific patterns. For example, if a function returns `{:ok, ...}` or `{:error, ...}`, prefer to explicitly match on both patterns: + +```elixir +case some_function(arg) do + {:ok, value} -> # ... + {:error, _} -> # ... +end +``` + +In particular, avoid matching solely on `_`, as shown below: + +```elixir +case some_function(arg) do + {:ok, value} -> # ... + _ -> # ... +end +``` + +Matching on `_` is less clear in intent and it may hide bugs if `some_function/1` adds new return values in the future. + +**Additional remarks** + +This anti-pattern was formerly known as Speculative assumptions. + +### Non-assertive truthiness + +**Problem** + +Elixir provides the concept of truthiness: `nil` and `false` are considered "falsy" and all other values are "truthy". Many constructs in the language, such as `&&/2`, `||/2`, and `!/1` handle truthy and falsy values. Using those operators is not an anti-pattern. However, using those operators when all operands are expected to be booleans, may be an anti-pattern. + +**Example** + +The simplest scenario where this anti-pattern manifests is in conditionals, such as: + +```elixir +if is_binary(name) && is_integer(age) do + # ... +else + # ... +end +``` + +Given both operands of `&&/2` are booleans, the code is more generic than necessary, and potentially unclear. + +**Refactoring** + +To remove this anti-pattern, we can replace `&&/2`, `||/2`, and `!/1` by `and/2`, `or/2`, and `not/1` respectively. These operators assert at least their first argument is a boolean: + +```elixir +if is_binary(name) and is_integer(age) do + # ... +else + # ... +end +``` + +This technique may be particularly important when working with Erlang code. Erlang does not have the concept of truthiness. It never returns `nil`, instead its functions may return `:error` or `:undefined` in places an Elixir developer would return `nil`. Therefore, to avoid accidentally interpreting `:undefined` or `:error` as a truthy value, you may prefer to use `and/2`, `or/2`, and `not/1` exclusively when interfacing with Erlang APIs. + +### Structs with 32 fields or more + +**Problem** + +Structs in Elixir are implemented as compile-time maps, which have a predefined amount of fields. When structs have 32 or more fields, their internal representation in the Erlang Virtual Machines changes, potentially leading to bloating and higher memory usage. + +**Example** + +Any struct with 32 or more fields will be problematic: + +```elixir +defmodule MyExample do + defstruct [ + :field1, + :field2, + ..., + :field35 + ] +end +``` + +The Erlang VM has two internal representations for maps: a flat map and a hash map. A flat map is represented internally as two tuples: one tuple containing the keys and another tuple holding the values. Whenever you update a flat map, the tuple keys are shared, reducing the amount of memory used by the update. A hash map has a more complex structure, which is efficient for a large amount of keys, but it does not share the key space. + +Maps of up to 32 keys are represented as flat maps. All others are hash map. Structs *are* maps (with a metadata field called `__struct__`) and so any struct with fewer than 32 fields is represented as a flat map. This allows us to optimise several struct operations, as we never add or remove fields to structs, we simply update them. + +Furthermore, structs of the same name "instantiated" in the same module will share the same "tuple keys" at compilation times, as long as they have fewer than 32 fields. For example, in the following code: + +```elixir +defmodule Example do + def users do + [%User{name: "John"}, %User{name: "Meg"}, ...] + end +end +``` + +All user structs will point to the same tuple keys at compile-time, also reducing the memory cost of instantiating structs with `%MyStruct{...}` notation. This optimisation is also not available if the struct has 32 keys or more. + +**Refactoring** + +Removing this anti-pattern, in a nutshell, requires ensuring your struct has fewer than 32 fields. There are a few techniques you could apply: + +- If the struct has "optional" fields, for example, fields which are initialised with nil, you could nest all optional fields into other field, called `:metadata`, `:optionals`, or similar. This could lead to benefits such as being able to use pattern matching to check if a field exists or not, instead of relying on `nil` values +- You could nest structs, by storing structs within other fields. Fields that are rarely read or written to are good candidates to be moved to a nested struct +- You could nest fields as tuples. For example, if two fields are always read or updated together, they could be moved to a tuple (or another composite data structure) + +The challenge is to balance the changes above with API ergonomics, in particular, when fields may be frequently read and written to. + +## Design-related anti-patterns + +This document outlines potential anti-patterns related to your modules, functions, and the role they play within a codebase. + +### Alternative return types + +**Problem** + +This anti-pattern refers to functions that receive options (typically as a *keyword list* parameter) that drastically change their return type. Because options are optional and sometimes set dynamically, if they also change the return type, it may be hard to understand what the function actually returns. + +**Example** + +An example of this anti-pattern, as shown below, is when a function has many alternative return types, depending on the options received as a parameter. + +```elixir +defmodule AlternativeInteger do + @spec parse(String.t(), keyword()) :: integer() | {integer(), String.t()} | :error + def parse(string, options \\ []) when is_list(options) do + if Keyword.get(options, :discard_rest, false) do + case Integer.parse(string) do + {int, _rest} -> int + :error -> :error + end + else + Integer.parse(string) + end + end +end +``` + +```elixir +iex> AlternativeInteger.parse("13") +{13, ""} +iex> AlternativeInteger.parse("13", discard_rest: false) +{13, ""} +iex> AlternativeInteger.parse("13", discard_rest: true) +13 +``` + +**Refactoring** + +To refactor this anti-pattern, as shown next, add a specific function for each return type (for example, `parse_discard_rest/1`), no longer delegating this to options passed as arguments. + +```elixir +defmodule AlternativeInteger do + @spec parse(String.t()) :: {integer(), String.t()} | :error + def parse(string) do + Integer.parse(string) + end + + @spec parse_discard_rest(String.t()) :: integer() | :error + def parse_discard_rest(string) do + case Integer.parse(string) do + {int, _rest} -> int + :error -> :error + end + end +end +``` + +```elixir +iex> AlternativeInteger.parse("13") +{13, ""} +iex> AlternativeInteger.parse_discard_rest("13") +13 +``` + +### Boolean obsession + +**Problem** + +This anti-pattern happens when booleans are used instead of atoms to encode information. The usage of booleans themselves is not an anti-pattern, but whenever multiple booleans are used with overlapping states, replacing the booleans by atoms (or composite data types such as *tuples*) may lead to clearer code. + +This is a special case of *Primitive obsession*, specific to boolean values. + +**Example** + +An example of this anti-pattern is a function that receives two or more options, such as `editor: true` and `admin: true`, to configure its behaviour in overlapping ways. In the code below, the `:editor` option has no effect if `:admin` is set, meaning that the `:admin` option has higher priority than `:editor`, and they are ultimately related. + +```elixir +defmodule MyApp do + def process(invoice, options \\ []) do + cond do + options[:admin] -> # Is an admin + options[:editor] -> # Is an editor + true -> # Is none + end + end +end +``` + +**Refactoring** + +Instead of using multiple options, the code above could be refactored to receive a single option, called `:role`, that can be either `:admin`, `:editor`, or `:default`: + +```elixir +defmodule MyApp do + def process(invoice, options \\ []) do + case Keyword.get(options, :role, :default) do + :admin -> # Is an admin + :editor -> # Is an editor + :default -> # Is none + end + end +end +``` + +This anti-pattern may also happen in our own data structures. For example, we may define a `User` struct with two boolean fields, `:editor` and `:admin`, while a single field named `:role` may be preferred. + +Finally, it is worth noting that using atoms may be preferred even when we have a single boolean argument/option. For example, consider an invoice which may be set as approved/unapproved. One option is to provide a function that expects a boolean: + +```elixir +MyApp.update(invoice, approved: true) +``` + +However, using atoms may read better and make it simpler to add further states (such as pending) in the future: + +```elixir +MyApp.update(invoice, status: :approved) +``` + +Remember booleans are internally represented as atoms. Therefore there is no performance penalty in one approach over the other. + +### Exceptions for control-flow + +**Problem** + +This anti-pattern refers to code that uses `Exception`s for control flow. Exception handling itself does not represent an anti-pattern, but developers must prefer to use `case` and pattern matching to change the flow of their code, instead of `try/rescue`. In turn, library authors should provide developers with APIs to handle errors without relying on exception handling. When developers have no freedom to decide if an error is exceptional or not, this is considered an anti-pattern. + +**Example** + +An example of this anti-pattern, as shown below, is using `try/rescue` to deal with file operations: + +```elixir +defmodule MyModule do + def print_file(file) do + try do + IO.puts(File.read!(file)) + rescue + e -> IO.puts(:stderr, Exception.message(e)) + end + end +end +``` + +```elixir +iex> MyModule.print_file("valid_file") +This is a valid file! +:ok +iex> MyModule.print_file("invalid_file") +could not read file "invalid_file": no such file or directory +:ok +``` + +**Refactoring** + +To refactor this anti-pattern, as shown next, use `File.read/1`, which returns tuples instead of raising when a file cannot be read: + +```elixir +defmodule MyModule do + def print_file(file) do + case File.read(file) do + {:ok, binary} -> IO.puts(binary) + {:error, reason} -> IO.puts(:stderr, "could not read file #{file}: #{reason}") + end + end +end +``` + +This is only possible because the `File` module provides APIs for reading files with tuples as results (`File.read/1`), as well as a version that raises an exception (`File.read!/1`). The bang (exclamation point) is effectively part of Elixir's naming conventions. + +Library authors are encouraged to follow the same practices. In practice, the bang variant is implemented on top of the non-raising version of the code. For example, `File.read!/1` is implemented as: + +```elixir +def read!(path) do + case read(path) do + {:ok, binary} -> + binary + + {:error, reason} -> + raise File.Error, reason: reason, action: "read file", path: IO.chardata_to_string(path) + end +end +``` + +A common practice followed by the community is to make the non-raising version return `{:ok, result}` or `{:error, Exception.t}`. For example, an HTTP client may return `{:ok, %HTTP.Response{}}` on success cases and `{:error, %HTTP.Error{}}` for failures, where `HTTP.Error` is implemented as an exception. This makes it convenient for anyone to raise an exception by simply calling `Kernel.raise/1`. + +**Additional remarks** + +This anti-pattern is of special importance to library authors and whenever writing functions that will be invoked by other developers and third-party code. Nevertheless, there are still scenarios where developers can afford to raise exceptions directly, for example: + +- invalid arguments: it is expected that functions will raise for invalid arguments, as those are structural error and not semantic errors. For example, `File.read(123)` will always raise, because `123` is never a valid filename +- during tests, scripts, etc: those are common scenarios where you want your code to fail as soon as possible in case of errors. Using `!` functions, such as `File.read!/1`, allows you to do so quickly and with clear error messages +- some frameworks, such as Phoenix, allow developers to raise exceptions in their code and uses a protocol to convert these exceptions into semantic HTTP responses + +This anti-pattern was formerly known as Using exceptions for control-flow. + +### Primitive obsession + +**Problem** + +This anti-pattern happens when Elixir basic types (for example, *integer*, *float*, and *string*) are excessively used to carry structured information, rather than creating specific composite data types (for example, *tuples*, *maps*, and *structs*) that can better represent a domain. + +**Example** + +An example of this anti-pattern is the use of a single *string* to represent an `Address`. An `Address` is a more complex structure than a simple basic (aka, primitive) value. + +```elixir +defmodule MyApp do + def extract_postal_code(address) when is_binary(address) do + # Extract postal code with address... + end + + def fill_in_country(address) when is_binary(address) do + # Fill in missing country... + end +end +``` + +While you may receive the `address` as a string from a database, web request, or a third-party, if you find yourself frequently manipulating or extracting information from the string, it is a good indicator you should convert the address into structured data. + +Another example of this anti-pattern is using floating numbers to model money and currency, when richer data structures should be preferred. + +**Refactoring** + +Possible solutions to this anti-pattern is to use maps or structs to model our address. The example below creates an `Address` struct, better representing this domain through a composite type. Additionally, we introduce a `parse/1` function, that converts the string into an `Address`, which will simplify the logic of remaining functions. With this modification, we can extract each field of this composite type individually when needed. + +```elixir +defmodule Address do + defstruct [:street, :city, :state, :postal_code, :country] +end +``` + +```elixir +defmodule MyApp do + def parse(address) when is_binary(address) do + # Returns %Address{} + end + + def extract_postal_code(%Address{} = address) do + # Extract postal code with address... + end + + def fill_in_country(%Address{} = address) do + # Fill in missing country... + end +end +``` + +### Unrelated multi-clause function + +**Problem** + +Using multi-clause functions is a powerful Elixir feature. However, some developers may abuse this feature to group *unrelated* functionality, which is an anti-pattern. + +**Example** + +A frequent example of this usage of multi-clause functions occurs when developers mix unrelated business logic into the same function definition, in a way that the behaviour of each clause becomes completely distinct from the others. Such functions often have too broad specifications, making it difficult for other developers to understand and maintain them. + +Some developers may use documentation mechanisms such as `@doc` annotations to compensate for poor code readability, however the documentation itself may end-up full of conditionals to describe how the function behaves for each different argument combination. This is a good indicator that the clauses are ultimately unrelated. + +```elixir +@doc """ +Updates a struct. + +If given a product, it will... + +If given an animal, it will... +""" +def update(%Product{count: count, material: material}) do + # ... +end + +def update(%Animal{count: count, skin: skin}) do + # ... +end +``` + +If updating an animal is completely different from updating a product and requires a different set of rules, it may be worth splitting those over different functions or even different modules. + +**Refactoring** + +As shown below, a possible solution to this anti-pattern is to break the business rules that are mixed up in a single unrelated multi-clause function in simple functions. Each function can have a specific name and `@doc`, describing its behaviour and parameters received. While this refactoring sounds simple, it can impact the function's callers, so be careful! + +```elixir +@doc """ +Updates a product. + +It will... +""" +def update_product(%Product{count: count, material: material}) do + # ... +end + +@doc """ +Updates an animal. + +It will... +""" +def update_animal(%Animal{count: count, skin: skin}) do + # ... +end +``` + +These functions may still be implemented with multiple clauses, as long as the clauses group related functionality. For example, `update_product` could be in practice implemented as follows: + +```elixir +def update_product(%Product{count: 0}) do + # ... +end + +def update_product(%Product{material: material}) + when material in ["metal", "glass"] do + # ... +end + +def update_product(%Product{material: material}) + when material not in ["metal", "glass"] do + # ... +end +``` + +You can see this pattern in practice within Elixir itself. The `+/2` operator can add `Integer`s and `Float`s together, but not `String`s, which instead use the `<>/2` operator. In this sense, it is reasonable to handle integers and floats in the same operation, but strings are unrelated enough to deserve their own function. + +You will also find examples in Elixir of functions that work with any struct, which would seemingly be an occurrence of this anti-pattern, such as `struct/2`: + +```elixir +iex> struct(URI.parse("/foo/bar"), path: "/bar/baz") +%URI{ + scheme: nil, + userinfo: nil, + host: nil, + port: nil, + path: "/bar/baz", + query: nil, + fragment: nil +} +``` + +The difference here is that the `struct/2` function behaves precisely the same for any struct given, therefore there is no question of how the function handles different inputs. If the behaviour is clear and consistent for all inputs, then the anti-pattern does not take place. + +### Using application configuration for libraries + +**Problem** + +The *application environment* can be used to parameterise global values that can be used in an Elixir system. This mechanism can be very useful and therefore is not considered an anti-pattern by itself. However, library authors should avoid using the application environment to configure their library. The reason is exactly that the application environment is a **global** state, so there can only be a single value for each key in the environment for an application. This makes it impossible for multiple applications depending on the same library to configure the same aspect of the library in different ways. + +**Example** + +The `DashSplitter` module represents a library that configures the behaviour of its functions through the global application environment. These configurations are concentrated in the *config/config.exs* file, shown below: + +```elixir +import Config + +config :app_config, + parts: 3 + +import_config "#{config_env()}.exs" +``` + +One of the functions implemented by the `DashSplitter` library is `split/1`. This function aims to separate a string received via a parameter into a certain number of parts. The character used as a separator in `split/1` is always `"-"` and the number of parts the string is split into is defined globally by the application environment. This value is retrieved by the `split/1` function by calling `Application.fetch_env!/2`, as shown next: + +```elixir +defmodule DashSplitter do + def split(string) when is_binary(string) do + parts = Application.fetch_env!(:app_config, :parts) # <= retrieve parameterised value + String.split(string, "-", parts: parts) # <= parts: 3 + end +end +``` + +Due to this parameterised value used by the `DashSplitter` library, all applications dependent on it can only use the `split/1` function with identical behaviour about the number of parts generated by string separation. Currently, this value is equal to 3, as we can see in the use examples shown below: + +```elixir +iex> DashSplitter.split("Lucas-Francisco-Vegi") +["Lucas", "Francisco", "Vegi"] +iex> DashSplitter.split("Lucas-Francisco-da-Matta-Vegi") +["Lucas", "Francisco", "da-Matta-Vegi"] +``` + +**Refactoring** + +To remove this anti-pattern, this type of configuration should be performed using a parameter passed to the function. The code shown below performs the refactoring of the `split/1` function by accepting keyword lists as a new optional parameter. With this new parameter, it is possible to modify the default behaviour of the function at the time of its call, allowing multiple different ways of using `split/2` within the same application: + +```elixir +defmodule DashSplitter do + def split(string, opts \\ []) when is_binary(string) and is_list(opts) do + parts = Keyword.get(opts, :parts, 2) # <= default config of parts == 2 + String.split(string, "-", parts: parts) + end +end +``` + +```elixir +iex> DashSplitter.split("Lucas-Francisco-da-Matta-Vegi", [parts: 5]) +["Lucas", "Francisco", "da", "Matta", "Vegi"] +iex> DashSplitter.split("Lucas-Francisco-da-Matta-Vegi") #<= default config is used! +["Lucas", "Francisco-da-Matta-Vegi"] +``` + +Of course, not all uses of the application environment by libraries are incorrect. One example is using configuration to replace a component (or dependency) of a library by another that must behave the exact same. Consider a library that needs to parse CSV files. The library author may pick one package to use as default parser but allow its users to swap to different implementations via the application environment. At the end of the day, choosing a different CSV parser should not change the outcome, and library authors can even enforce this by defining behaviours with the exact semantics they expect. + +**Additional remarks: Supervision trees** + +In practice, libraries may require additional configuration beyond keyword lists. For example, if a library needs to start a supervision tree, how can the user of said library customise its supervision tree? Given the supervision tree itself is global (as it belongs to the library), library authors may be tempted to use the application configuration once more. + +One solution is for the library to provide its own child specification, instead of starting the supervision tree itself. This allows the user to start all necessary processes under its own supervision tree, potentially passing custom configuration options during initialisation. + +You can see this pattern in practice in projects like Nx and DNS Cluster. These libraries require that you list processes under your own supervision tree: + +```elixir +children = [ + {DNSCluster, query: "my.subdomain"} +] +``` + +In such cases, if the users of `DNSCluster` need to configure DNSCluster per environment, they can be the ones reading from the application environment, without the library forcing them to: + +```elixir +children = [ + {DNSCluster, query: Application.get_env(:my_app, :dns_cluster_query) || :ignore} +] +``` + +Some libraries, such as Ecto, allow you to pass your application name as an option (called `:otp_app` or similar) and then automatically read the environment from *your* application. While this addresses the issue with the application environment being global, as they read from each individual application, it comes at the cost of some indirection, compared to the example above where users explicitly read their application environment from their own code, whenever desired. + +**Additional remarks: Compile-time configuration** + +A similar discussion entails compile-time configuration. What if a library author requires some configuration to be provided at compilation time? + +Once again, instead of forcing users of your library to provide compile-time configuration, you may want to allow users of your library to generate the code themselves. That's the approach taken by libraries such as Ecto: + +```elixir +defmodule MyApp.Repo do + use Ecto.Repo, adapter: Ecto.Adapters.Postgres +end +``` + +Instead of forcing developers to share a single repository, Ecto allows its users to define as many repositories as they want. Given the `:adapter` configuration is required at compile-time, it is a required value on `use Ecto.Repo`. If developers want to configure the adapter per environment, then it is their choice: + +```elixir +defmodule MyApp.Repo do + use Ecto.Repo, adapter: Application.compile_env(:my_app, :repo_adapter) +end +``` + +On the other hand, code generation comes with its own anti-patterns, and must be considered carefully. That's to say: while using the application environment for libraries is discouraged, especially compile-time configuration, in some cases they may be the best option. For example, consider a library needs to parse CSV or JSON files to generate code based on data files. In such cases, it is best to provide reasonable defaults and make them customisable via the application environment, instead of asking each user of your library to generate the exact same code. + +**Additional remarks: Mix tasks** + +For Mix tasks and related tools, it may be necessary to provide per-project configuration. For example, imagine you have a `:linter` project, which supports setting the output file and the verbosity level. You may choose to configure it through application environment: + +```elixir +config :linter, + output_file: "/path/to/output.json", + verbosity: 3 +``` + +However, `Mix` allows tasks to read per-project configuration via `Mix.Project.config/0`. In this case, you can configure the `:linter` directly in the `mix.exs` file: + +```elixir +def project do + [ + app: :my_app, + version: "1.0.0", + linter: [ + output_file: "/path/to/output.json", + verbosity: 3 + ], + ... + ] +end +``` + +Additionally, if a Mix task is available, you can also accept these options as command line arguments (see `OptionParser`): + +``` +mix linter --output-file /path/to/output.json --verbosity 3 +``` + +## Process-related anti-patterns + +This document outlines potential anti-patterns related to processes and process-based abstractions. + +### Code organisation by process + +**Problem** + +This anti-pattern refers to code that is unnecessarily organised by processes. A process itself does not represent an anti-pattern, but it should only be used to model runtime properties (such as concurrency, access to shared resources, error isolation, etc). When you use a process for code organisation, it can create bottlenecks in the system. + +**Example** + +An example of this anti-pattern, as shown below, is a module that implements arithmetic operations (like `add` and `subtract`) by means of a `GenServer` process. If the number of calls to this single process grows, this code organisation can compromise the system performance, therefore becoming a bottleneck. + +```elixir +defmodule Calculator do + @moduledoc """ + Calculator that performs basic arithmetic operations. + + This code is unnecessarily organised in a GenServer process. + """ + + use GenServer + + def add(a, b, pid) do + GenServer.call(pid, {:add, a, b}) + end + + def subtract(a, b, pid) do + GenServer.call(pid, {:subtract, a, b}) + end + + @impl GenServer + def init(init_arg) do + {:ok, init_arg} + end + + @impl GenServer + def handle_call({:add, a, b}, _from, state) do + {:reply, a + b, state} + end + + def handle_call({:subtract, a, b}, _from, state) do + {:reply, a - b, state} + end +end +``` + +```elixir +iex> {:ok, pid} = GenServer.start_link(Calculator, :init) +{:ok, #PID<0.132.0>} +iex> Calculator.add(1, 5, pid) +6 +iex> Calculator.subtract(2, 3, pid) +-1 +``` + +**Refactoring** + +In Elixir, as shown next, code organisation must be done only through modules and functions. Whenever possible, a library should not impose specific behaviour (such as parallelisation) on its users. It is better to delegate this behavioural decision to the developers of clients, thus increasing the potential for code reuse of a library. + +```elixir +defmodule Calculator do + def add(a, b) do + a + b + end + + def subtract(a, b) do + a - b + end +end +``` + +```elixir +iex> Calculator.add(1, 5) +6 +iex> Calculator.subtract(2, 3) +-1 +``` + +### Scattered process interfaces + +**Problem** + +In Elixir, the use of an `Agent`, a `GenServer`, or any other process abstraction is not an anti-pattern in itself. However, when the responsibility for direct interaction with a process is spread throughout the entire system, it can become problematic. This bad practice can increase the difficulty of code maintenance and make the code more prone to bugs. + +**Example** + +The following code seeks to illustrate this anti-pattern. The responsibility for interacting directly with the `Agent` is spread across four different modules (`A`, `B`, `C`, and `D`). + +```elixir +defmodule A do + def update(process) do + # Some other code... + Agent.update(process, fn _list -> 123 end) + end +end +``` + +```elixir +defmodule B do + def update(process) do + # Some other code... + Agent.update(process, fn content -> %{a: content} end) + end +end +``` + +```elixir +defmodule C do + def update(process) do + # Some other code... + Agent.update(process, fn content -> [:atom_value | content] end) + end +end +``` + +```elixir +defmodule D do + def get(process) do + # Some other code... + Agent.get(process, fn content -> content end) + end +end +``` + +This spreading of responsibility can generate duplicated code and make code maintenance more difficult. Also, due to the lack of control over the format of the shared data, complex composed data can be shared. This freedom to use any format of data is dangerous and can induce developers to introduce bugs. + +```elixir +# start an agent with initial state of an empty list +iex> {:ok, agent} = Agent.start_link(fn -> [] end) +{:ok, #PID<0.135.0>} + +# many data formats (for example, List, Map, Integer, Atom) are +# combined through direct access spread across the entire system +iex> A.update(agent) +iex> B.update(agent) +iex> C.update(agent) + +# state of shared information +iex> D.get(agent) +[:atom_value, %{a: 123}] +``` + +For a `GenServer` and other behaviours, this anti-pattern will manifest when scattering calls to `GenServer.call/3` and `GenServer.cast/2` throughout multiple modules, instead of encapsulating all the interaction with the `GenServer` in a single place. + +**Refactoring** + +Instead of spreading direct access to a process abstraction, such as `Agent`, over many places in the code, it is better to refactor this code by centralising the responsibility for interacting with a process in a single module. This refactoring improves maintainability by removing duplicated code; it also allows you to limit the accepted format for shared data, reducing bug-proneness. As shown below, the module `Foo.Bucket` is centralising the responsibility for interacting with the `Agent`. Any other place in the code that needs to access shared data must now delegate this action to `Foo.Bucket`. Also, `Foo.Bucket` now only allows data to be shared in `Map` format. + +```elixir +defmodule Foo.Bucket do + use Agent + + def start_link(_opts) do + Agent.start_link(fn -> %{} end) + end + + def get(bucket, key) do + Agent.get(bucket, &Map.get(&1, key)) + end + + def put(bucket, key, value) do + Agent.update(bucket, &Map.put(&1, key, value)) + end +end +``` + +The following are examples of how to delegate access to shared data (provided by an `Agent`) to `Foo.Bucket`. + +```elixir +# start an agent through `Foo.Bucket` +iex> {:ok, bucket} = Foo.Bucket.start_link(%{}) +{:ok, #PID<0.114.0>} + +# add shared values to the keys `milk` and `beer` +iex> Foo.Bucket.put(bucket, "milk", 3) +iex> Foo.Bucket.put(bucket, "beer", 7) + +# access shared data of specific keys +iex> Foo.Bucket.get(bucket, "beer") +7 +iex> Foo.Bucket.get(bucket, "milk") +3 +``` + +**Additional remarks** + +This anti-pattern was formerly known as Agent obsession. + +### Sending unnecessary data + +**Problem** + +Sending a message to a process can be an expensive operation if the message is big enough. That's because that message will be fully copied to the receiving process, which may be CPU and memory intensive. This is due to Erlang's "share nothing" architecture, where each process has its own memory, which simplifies and speeds up garbage collection. + +This is more obvious when using `send/2`, `GenServer.call/3`, or the initial data in `GenServer.start_link/3`. Notably this also happens when using `spawn/1`, `Task.async/1`, `Task.async_stream/3`, and so on. It is more subtle here as the anonymous function passed to these functions captures the variables it references, and all captured variables will be copied over. By doing this, you can accidentally send way more data to a process than you actually need. + +**Example** + +Imagine you were to implement some simple reporting of IP addresses that made requests against your application. You want to do this asynchronously and not block processing, so you decide to use `spawn/1`. It may seem like a good idea to hand over the whole connection because we might need more data later. However passing the connection results in copying a lot of unnecessary data like the request body, params, etc. + +```elixir +# log_request_ip send the ip to some external service +spawn(fn -> log_request_ip(conn) end) +``` + +This problem also occurs when accessing only the relevant parts: + +```elixir +spawn(fn -> log_request_ip(conn.remote_ip) end) +``` + +This will still copy over all of `conn`, because the `conn` variable is being captured inside the spawned function. The function then extracts the `remote_ip` field, but only after the whole `conn` has been copied over. + +`send/2` and the `GenServer` APIs also rely on message passing. In the example below, the `conn` is once again copied to the underlying `GenServer`: + +```elixir +GenServer.cast(pid, {:report_ip_address, conn}) +``` + +**Refactoring** + +This anti-pattern has many potential remedies: + +- Limit the data you send to the absolute necessary minimum instead of sending an entire struct. For example, don't send an entire `conn` struct if all you need is a couple of fields. +- If the only process that needs data is the one you are sending to, consider making the process fetch that data instead of passing it. +- Some abstractions, such as `:persistent_term`, allows you to share data between processes, as long as such data changes infrequently. + +In our case, limiting the input data is a reasonable strategy. If all we need *right now* is the IP address, then let's only work with that and make sure we're only passing the IP address into the closure, like so: + +```elixir +ip_address = conn.remote_ip +spawn(fn -> log_request_ip(ip_address) end) +``` + +Or in the `GenServer` case: + +```elixir +GenServer.cast(pid, {:report_ip_address, conn.remote_ip}) +``` + +### Unsupervised processes + +**Problem** + +In Elixir, creating a process outside a supervision tree is not an anti-pattern in itself. However, when you spawn many long-running processes outside of supervision trees, this can make visibility and monitoring of these processes difficult, preventing developers from fully controlling their applications. + +**Example** + +The following code example seeks to illustrate a library responsible for maintaining a numerical `Counter` through a `GenServer` process *outside a supervision tree*. Multiple counters can be created simultaneously by a client (one process for each counter), making these *unsupervised* processes difficult to manage. This can cause problems with the initialisation, restart, and shutdown of a system. + +```elixir +defmodule Counter do + @moduledoc """ + Global counter implemented through a GenServer process. + """ + + use GenServer + + @doc "Starts a counter process." + def start_link(opts \\ []) do + initial_value = Keyword.get(opts, :initial_value, 0) + name = Keyword.get(opts, :name, __MODULE__) + GenServer.start(__MODULE__, initial_value, name: name) + end + + @doc "Gets the current value of the given counter." + def get(pid_name \\ __MODULE__) do + GenServer.call(pid_name, :get) + end + + @doc "Bumps the value of the given counter." + def bump(pid_name \\ __MODULE__, value) do + GenServer.call(pid_name, {:bump, value}) + end + + @impl true + def init(counter) do + {:ok, counter} + end + + @impl true + def handle_call(:get, _from, counter) do + {:reply, counter, counter} + end + + def handle_call({:bump, value}, _from, counter) do + {:reply, counter, counter + value} + end +end +``` + +```elixir +iex> Counter.start_link() +{:ok, #PID<0.115.0>} +iex> Counter.get() +0 +iex> Counter.start_link(initial_value: 15, name: :other_counter) +{:ok, #PID<0.120.0>} +iex> Counter.get(:other_counter) +15 +iex> Counter.bump(:other_counter, -3) +12 +iex> Counter.bump(Counter, 7) +7 +``` + +**Refactoring** + +To ensure that clients of a library have full control over their systems, regardless of the number of processes used and the lifetime of each one, all processes must be started inside a supervision tree. As shown below, this code uses a `Supervisor` as a supervision tree. When this Elixir application is started, two different counters (`Counter` and `:other_counter`) are also started as child processes of the `Supervisor` named `App.Supervisor`. One is initialised with `0`, the other with `15`. By means of this supervision tree, it is possible to manage the life cycle of all child processes (stopping or restarting each one), improving the visibility of the entire app. + +```elixir +defmodule SupervisedProcess.Application do + use Application + + @impl true + def start(_type, _args) do + children = [ + # With the default values for counter and name + Counter, + # With custom values for counter, name, and a custom ID + Supervisor.child_spec( + {Counter, name: :other_counter, initial_value: 15}, + id: :other_counter + ) + ] + + Supervisor.start_link(children, strategy: :one_for_one, name: App.Supervisor) + end +end +``` + +```elixir +iex> Supervisor.count_children(App.Supervisor) +%{active: 2, specs: 2, supervisors: 0, workers: 2} +iex> Counter.get(Counter) +0 +iex> Counter.get(:other_counter) +15 +iex> Counter.bump(Counter, 7) +7 +iex> Supervisor.terminate_child(App.Supervisor, Counter) +iex> Supervisor.count_children(App.Supervisor) # Only one active child +%{active: 1, specs: 2, supervisors: 0, workers: 2} +iex> Counter.get(Counter) # The process was terminated +** (EXIT) no process: the process is not alive... +iex> Supervisor.restart_child(App.Supervisor, Counter) +iex> Counter.get(Counter) # After the restart, this process can be used again +0 +``` + +## Meta-programming anti-patterns + +This document outlines potential anti-patterns related to meta-programming. + +### Compile-time dependencies + +**Problem** + +This anti-pattern is related to dependencies between files in Elixir. Because macros are used at compile-time, the use of any macro in Elixir adds a compile-time dependency to the module that defines the macro. + +However, when macros are used in the body of a module, the arguments to the macro themselves may become compile-time dependencies. These dependencies may lead to dependency graphs where changing a single file causes several files to be recompiled. + +**Example** + +Let's take the `Plug` library as an example. The `Plug` project allows you to specify several modules, also known as plugs, which will be invoked whenever there is a request. As a user of `Plug`, you would use it as follows: + +```elixir +defmodule MyApp do + use Plug.Builder + + plug MyApp.Authentication +end +``` + +And imagine `Plug` has the following definitions of the macros above (simplified): + +```elixir +defmodule Plug.Builder do + defmacro __using__(_opts) do + quote do + Module.register_attribute(__MODULE__, :plugs, accumulate: true) + @before_compile Plug.Builder + end + end + + defmacro plug(mod) do + quote do + @plugs unquote(mod) + end + end + + ... +end +``` + +The implementation accumulates all modules inside the `@plugs` module attribute. Right before the module is compiled, `Plug.Builder` will reads all modules stored in `@plugs` and compile them into a function, like this: + +```elixir +def call(conn, _opts) do + MyApp.Authentication.call(conn) +end +``` + +The trouble with the code above is that, because the `plug MyApp.Authentication` was invoked at compile-time, the module `MyApp.Authentication` is now a compile-time dependency of `MyApp`, even though `MyApp.Authentication` is never used at compile-time. If `MyApp.Authentication` depends on other modules, even at runtime, this can now lead to a large recompilation graph in case of changes. + +**Refactoring** + +To address this anti-pattern, a macro can expand literals within the context they are meant to be used, as follows: + +```elixir +defmacro plug(mod) do + mod = Macro.expand_literals(mod, %{__CALLER__ | function: {:call, 2}}) + + quote do + @plugs unquote(mod) + end +end +``` + +In the example above, since `mod` is used only within the `call/2` function, we prematurely expand module reference as if it was inside the `call/2` function. Now `MyApp.Authentication` is only a runtime dependency of `MyApp`, no longer a compile-time one. + +Note, however, the above must only be done if your macros do not attempt to invoke any function, access any struct, or any other metadata of the module at compile-time. If you interact with the module given to a macro anywhere outside of definition of a function, then you effectively have a compile-time dependency. And, even though you generally want to avoid them, it is not always possible. + +In actual projects, developers may use `mix xref trace path/to/file.ex` to execute a file and have it print information about which modules it depends on, and if those modules are compile-time, runtime, or export dependencies. See `mix xref` for more information. + +### Large code generation + +**Problem** + +This anti-pattern is related to macros that generate too much code. When a macro generates a large amount of code, it impacts how the compiler and/or the runtime work. The reason for this is that Elixir may have to expand, compile, and execute the code multiple times, which will make compilation slower and the resulting compiled artifacts larger. + +**Example** + +Imagine you are defining a router for a web application, where you could have macros like `get/2`. On every invocation of the macro (which could be hundreds), the code inside `get/2` will be expanded and compiled, which can generate a large volume of code overall. + +```elixir +defmodule Routes do + defmacro get(route, handler) do + quote do + route = unquote(route) + handler = unquote(handler) + + if not is_binary(route) do + raise ArgumentError, "route must be a binary" + end + + if not is_atom(handler) do + raise ArgumentError, "handler must be a module" + end + + @store_route_for_compilation {route, handler} + end + end +end +``` + +**Refactoring** + +To remove this anti-pattern, the developer should simplify the macro, delegating part of its work to other functions. As shown below, by encapsulating the code inside `quote/1` inside the function `__define__/3` instead, we reduce the code that is expanded and compiled on every invocation of the macro, and instead we dispatch to a function to do the bulk of the work. + +```elixir +defmodule Routes do + defmacro get(route, handler) do + quote do + Routes.__define__(__MODULE__, unquote(route), unquote(handler)) + end + end + + def __define__(module, route, handler) do + if not is_binary(route) do + raise ArgumentError, "route must be a binary" + end + + if not is_atom(handler) do + raise ArgumentError, "handler must be a module" + end + + Module.put_attribute(module, :store_route_for_compilation, {route, handler}) + end +end +``` + +### Unnecessary macros + +**Problem** + +*Macros* are powerful meta-programming mechanisms that can be used in Elixir to extend the language. While using macros is not an anti-pattern in itself, this meta-programming mechanism should only be used when absolutely necessary. Whenever a macro is used, but it would have been possible to solve the same problem using functions or other existing Elixir structures, the code becomes unnecessarily more complex and less readable. Because macros are more difficult to implement and reason about, their indiscriminate use can compromise the evolution of a system, reducing its maintainability. + +**Example** + +The `MyMath` module implements the `sum/2` macro to perform the sum of two numbers received as parameters. While this code has no syntax errors and can be executed correctly to get the desired result, it is unnecessarily more complex. By implementing this functionality as a macro rather than a conventional function, the code became less clear: + +```elixir +defmodule MyMath do + defmacro sum(v1, v2) do + quote do + unquote(v1) + unquote(v2) + end + end +end +``` + +```elixir +iex> require MyMath +MyMath +iex> MyMath.sum(3, 5) +8 +iex> MyMath.sum(3 + 1, 5 + 6) +15 +``` + +**Refactoring** + +To remove this anti-pattern, the developer must replace the unnecessary macro with structures that are simpler to write and understand, such as named functions. The code shown below is the result of the refactoring of the previous example. Basically, the `sum/2` macro has been transformed into a conventional named function. Note that the `require/2` call is no longer needed: + +```elixir +defmodule MyMath do + def sum(v1, v2) do # <= The macro became a named function + v1 + v2 + end +end +``` + +```elixir +iex> MyMath.sum(3, 5) +8 +iex> MyMath.sum(3+1, 5+6) +15 +``` + +### `use` instead of `import` + +**Problem** + +Elixir has mechanisms such as `import/1`, `alias/1`, and `use/1` to establish dependencies between modules. Code implemented with these mechanisms does not characterise a smell by itself. However, while the `import/1` and `alias/1` directives have lexical scope and only facilitate a module calling functions of another, the `use/1` directive has a *broader scope*, which can be problematic. + +The `use/1` directive allows a module to inject any type of code into another, including propagating dependencies. In this way, using the `use/1` directive makes code harder to read, because to understand exactly what will happen when it references a module, it is necessary to have knowledge of the internal details of the referenced module. + +**Example** + +The code shown below is an example of this anti-pattern. It defines three modules -- `ModuleA`, `Library`, and `ClientApp`. `ClientApp` is reusing code from the `Library` via the `use/1` directive, but is unaware of its internal details. This makes it harder for the author of `ClientApp` to visualise which modules and functionality are now available within its module. To make matters worse, `Library` also imports `ModuleA`, which defines a `foo/0` function that conflicts with a local function defined in `ClientApp`: + +```elixir +defmodule ModuleA do + def foo do + "From Module A" + end +end +``` + +```elixir +defmodule Library do + defmacro __using__(_opts) do + quote do + import Library + import ModuleA # <= propagating dependencies! + end + end + + def from_lib do + "From Library" + end +end +``` + +```elixir +defmodule ClientApp do + use Library + + def foo do + "Local function from client app" + end + + def from_client_app do + from_lib() <> " - " <> foo() + end +end +``` + +When we try to compile `ClientApp`, Elixir detects the conflict and throws the following error: + +``` +error: imported ModuleA.foo/0 conflicts with local function + └ client_app.ex:4: +``` + +**Refactoring** + +To remove this anti-pattern, we recommend library authors avoid providing `__using__/1` callbacks whenever it can be replaced by `alias/1` or `import/1` directives. In the following code, we assume `use Library` is no longer available and `ClientApp` was refactored in this way, and with that, the code is clearer and the conflict as previously shown no longer exists: + +```elixir +defmodule ClientApp do + import Library + + def foo do + "Local function from client app" + end + + def from_client_app do + from_lib() <> " - " <> foo() + end +end +``` + +```elixir +iex> ClientApp.from_client_app() +"From Library - Local function from client app" +``` + +**Additional remarks** + +In situations where you need to do more than importing and aliasing modules, providing `use MyModule` may be necessary, as it provides a common extension point within the Elixir ecosystem. + +Therefore, to provide guidance and clarity, we recommend library authors to include an admonition block in their `@moduledoc` that explains how `use MyModule` impacts the developer's code. As an example, the `GenServer` documentation outlines: + +> #### `use GenServer` +> +> When you `use GenServer`, the `GenServer` module will set `@behaviour GenServer` and define a `child_spec/1` function, so your module can be used as a child in a supervision tree. + +Think of this summary as a "Nutrition facts label" for code generation. Make sure to only list changes made to the public API of the module. For example, if `use Library` sets an internal attribute called `@_some_module_info` and this attribute is never meant to be public, avoid documenting it in the nutrition facts. + +For convenience, the markup notation to generate the admonition block above is this: + +```markdown +> #### `use GenServer` {: .info} +> +> When you `use GenServer`, the `GenServer` module will +> set `@behaviour GenServer` and define a `child_spec/1` +> function, so your module can be used as a child +> in a supervision tree. +``` + +### Untracked compile-time dependencies + +**Problem** + +This anti-pattern is the opposite of "Compile-time dependencies" and it happens when a compile-time dependency is accidentally bypassed, making the Elixir compiler unable to track dependencies and recompile files correctly. This happens when building aliases (in other words, module names) dynamically, either within a module or within a macro. + +**Example** + +For example, imagine you invoke a module at compile-time, you could write it as such: + +```elixir +defmodule MyModule do + SomeOtherModule.example() +end +``` + +In this case, Elixir knows `MyModule` is invoked `SomeOtherModule.example/0` outside of a function, and therefore at compile-time. + +Elixir can also track module names even during dynamic calls: + +```elixir +defmodule MyModule do + mods = [OtherModule.Foo, OtherModule.Bar] + + for mod <- mods do + mod.example() + end +end +``` + +In the previous example, even though Elixir does not know which modules the function `example/0` was invoked on, it knows the modules `OtherModule.Foo` and `OtherModule.Bar` are referred outside of a function and therefore they become compile-time dependencies. If any of them change, Elixir will recompile `MyModule` itself. + +However, you should not programatically generate the module names themselves, as that would make it impossible for Elixir to track them. More precisely, do not do this: + +```elixir +defmodule MyModule do + parts = [:Foo, :Bar] + + for part <- parts do + Module.concat(OtherModule, part).example() + end +end +``` + +In this case, because the whole module was generated, Elixir sees a dependency only to `OtherModule`, never to `OtherModule.Foo` and `OtherModule.Bar`, potentially leading to inconsistencies when recompiling projects. + +A similar bug can happen when abusing the property that aliases are simply atoms, defining the atoms directly. In the case below, Elixir never sees the aliases, leading to untracked compile-time dependencies: + +```elixir +defmodule MyModule do + mods = [:"Elixir.OtherModule.Foo", :"Elixir.OtherModule.Bar"] + + for mod <- mods do + mod.example() + end +end +``` + +**Refactoring** + +To address this anti-pattern, you should avoid defining module names programatically. For example, if you need to dispatch to multiple modules, do so by using full module names. + +Instead of: + +```elixir +defmodule MyModule do + parts = [:Foo, :Bar] + + for part <- parts do + Module.concat(OtherModule, part).example() + end +end +``` + +Do: + +```elixir +defmodule MyModule do + mods = [OtherModule.Foo, OtherModule.Bar] + + for mod <- mods do + mod.example() + end +end +``` + +If you really need to define modules dynamically, you can do so via meta-programming, building the whole module name at compile-time: + +```elixir +defmodule MyMacro do + defmacro call_examples(parts) do + for part <- parts do + quote do + # This builds OtherModule.Foo at compile-time + OtherModule.unquote(part).example() + end + end + end +end + +defmodule MyModule do + import MyMacro + call_examples [:Foo, :Bar] +end +``` + +In actual projects, developers may use `mix xref trace path/to/file.ex` to execute a file and have it print information about which modules it depends on, and if those modules are compile-time, runtime, or export dependencies. This can help you debug if the dependencies are being properly tracked in relation to external modules. See `mix xref` for more information. diff --git a/intent/plugins/claude/subagents/elixir/metadata.json b/intent/plugins/claude/subagents/elixir/metadata.json new file mode 100644 index 0000000..24ea195 --- /dev/null +++ b/intent/plugins/claude/subagents/elixir/metadata.json @@ -0,0 +1,8 @@ +{ + "name": "elixir", + "version": "1.0.0", + "description": "Elixir code doctor with functional programming expertise and Usage Rules integration", + "author": "Intent Contributors", + "tools": ["Bash", "Read", "Write", "Edit", "Grep", "WebFetch"], + "tags": ["elixir", "functional-programming", "usage-rules", "ash-framework", "phoenix", "railway-oriented"] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/intent/agent.md b/intent/plugins/claude/subagents/intent/agent.md new file mode 100644 index 0000000..1b9049b --- /dev/null +++ b/intent/plugins/claude/subagents/intent/agent.md @@ -0,0 +1,90 @@ +--- +name: intent +description: Helps manage Intent projects using steel threads methodology and backlog task management +tools: Bash, Read, Write, Edit, Grep +--- + +You are an Intent-aware development assistant specialized in the Intent project management framework, steel threads methodology, and backlog task management. + +## Intent Framework Knowledge + +Intent is a project management framework that captures the "why" behind code through: +- **Steel Threads**: Self-contained units of work with documented intentions +- **Backlog Management**: Task tracking system integrated with steel threads +- **Structured Organization**: intent/st/ST####/ directories and backlog/tasks/ +- **Clear Commands**: Comprehensive CLI for project management + +## Key Command Groups + +### Steel Thread Commands +- `intent st new "Title"` - Create new steel thread +- `intent st list` - List all steel threads +- `intent st show <id>` - Display steel thread details +- `intent st status <id> <status>` - Update steel thread status + +### Backlog Commands +- `intent bl task new <st-id> "Description"` - Create task linked to steel thread +- `intent bl task list [--status=<status>]` - List tasks with optional filtering +- `intent bl task show <task-id>` - Show task details +- `intent bl task update <task-id> <field> <value>` - Update task fields +- `intent bl task done <task-id>` - Mark task as completed +- `intent bl status` - Show backlog overview + +### Help & Diagnostics +- `intent help` - Show general help +- `intent help <command>` - Show help for specific command +- `intent doctor` - Verify Intent configuration and health +- `intent info` - Display Intent version and configuration + +## When Working on Intent Projects + +1. **Check Project Structure**: + - Look for intent/ directory and .intent/config.json + - Verify backlog/ directory exists if using task management + +2. **Steel Thread Workflow**: + - Create steel thread: `intent st new "Feature Name"` + - Document intention in info.md + - Break down work into tasks using backlog + +3. **Task Management Workflow**: + - Create tasks linked to steel threads + - Track progress with task status updates + - Use `intent bl status` for project overview + +4. **Getting Help**: + - Use `intent help` for command reference + - Run `intent doctor` if things seem broken + - Check documentation in intent/docs/ + +## Best Practices + +1. **Always Link Tasks to Steel Threads**: Every task should connect to a parent steel thread +2. **Document Intentions First**: Create steel thread and document "why" before coding +3. **Update Status Regularly**: Keep steel thread and task statuses current +4. **Use Descriptive Names**: Both steel threads and tasks should be self-explanatory + +## Common Workflows + +### Starting New Feature +```bash +intent st new "Add user authentication" +intent bl task new ST0042 "Research auth libraries" +intent bl task new ST0042 "Design auth architecture" +intent bl task new ST0042 "Implement JWT tokens" +``` + +### Checking Project Status +```bash +intent st list --status="In Progress" +intent bl status +intent bl task list --status=pending +``` + +### Getting Help +```bash +intent help # General help +intent help st new # Specific command help +intent doctor # Check configuration +``` + diff --git a/intent/plugins/claude/subagents/intent/metadata.json b/intent/plugins/claude/subagents/intent/metadata.json new file mode 100644 index 0000000..777c8ed --- /dev/null +++ b/intent/plugins/claude/subagents/intent/metadata.json @@ -0,0 +1,8 @@ +{ + "name": "intent", + "version": "1.0.0", + "description": "Intent-aware assistant for steel threads and backlog management", + "author": "Intent Contributors", + "tools": ["Bash", "Read", "Write", "Edit", "Grep"], + "tags": ["project-management", "steel-threads", "backlog", "task-tracking"] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/socrates/README.md b/intent/plugins/claude/subagents/socrates/README.md new file mode 100644 index 0000000..ca2f00c --- /dev/null +++ b/intent/plugins/claude/subagents/socrates/README.md @@ -0,0 +1,145 @@ +# Socrates Agent - CTO Review Mode + +## Overview + +The Socrates agent implements "CTO Review Mode" - a structured approach to technical decision-making through Socratic dialog between two expert personas. This methodology transforms technical reviews from checkbox exercises into genuine exploration. + +## Purpose + +Technical decisions often suffer from single-perspective analysis, unexplored edge cases, and insufficient challenge to assumptions. The Socrates agent addresses this by facilitating a structured conversation between: + +- **Socrates (The CTO)**: A strategic thinker with 30+ years experience +- **Plato (The Tech Lead)**: An implementation expert with deep technical knowledge + +## When to Use + +### Ideal For: +- Architecture decisions (microservices vs monolith, database selection) +- Technology selection (framework choices, build vs buy) +- Complex refactoring strategies +- API design choices +- Performance optimization approaches +- Integration planning + +### Not Necessary For: +- Simple bug fixes +- Routine updates +- Well-established patterns +- Emergency hotfixes + +## How It Works + +The agent facilitates a dialog through five phases: + +1. **Opening**: CTO asks fundamental questions about the problem +2. **Exploration**: Multiple rounds of questions and detailed analysis +3. **Challenge**: CTO challenges assumptions, Tech Lead defends complexity +4. **Synthesis**: Both work toward pragmatic recommendations +5. **Conclusion**: Agreement on approach, trade-offs, and next steps + +## Usage with Claude Code + +### Direct Invocation + +Simply ask Claude to use CTO Review Mode for your technical decision: + +``` +"I need to decide between PostgreSQL and DynamoDB for our new service. +Can you conduct a CTO Review Mode dialog exploring this decision? +Context: 5-person team, expecting 1M users in year one, +strong PostgreSQL experience but no NoSQL experience." +``` + +### Using with Task Tool + +When delegating to the Socrates agent as a sub-agent: + +```javascript +Task( + description="Review authentication architecture", + prompt="Conduct CTO Review Mode dialog for our authentication system redesign. We need to support enterprise SSO, maintain 15-minute token expiry for compliance, and ensure zero-downtime migration.", + subagent_type="socrates" +) +``` + +## Integration with Intent + +The Socrates agent is Intent-aware and integrates seamlessly: + +- References steel threads in dialogs +- Outputs can become part of steel thread design documentation +- Dialog conclusions can generate backlog tasks +- Facilitates team alignment through shared decision records + +## Example Output Structure + +```markdown +# CTO Review Mode: [Decision Topic] + +## The Socratic Dialog + +**Socrates (CTO):** What's driving the need for...? + +**Plato (Tech Lead):** We're facing three key constraints... + +[Dialog continues through exploration] + +## Recommendation +[Clear, actionable recommendation] + +## Key Trade-offs +- Performance vs simplicity +- Time to market vs technical debt +- [etc.] + +## Next Steps +- [ ] Create steel thread for implementation +- [ ] Document in design.md +- [ ] Create backlog tasks +``` + +## Benefits + +1. **Comprehensive Analysis**: Forces exploration from multiple angles +2. **Decision Documentation**: Creates permanent record of reasoning +3. **Challenge Assumptions**: Natural questioning reveals blind spots +4. **Educational Value**: Junior developers learn from dialog format +5. **Async Reviews**: Team members can contribute asynchronously + +## Origin + +This methodology was developed by Matthew Sinclair and successfully used across multiple projects including MeetZaya, Anvil, and Laksa. The approach is documented in detail in: + +- [MeetZaya Blog: CTO Review Socratic Dialog AI](https://github.com/meetzaya/meetzaya/blob/main/intent/docs/blog/B002_cto_review_socratic_dialog_ai.md) +- [Technical Note TN027](https://github.com/meetzaya/meetzaya/blob/main/intent/docs/notes/TN027_cto_review_socratic_dialog.md) + +## Installation + +```bash +# Install the Socrates agent globally +intent agents install socrates + +# Verify installation +intent agents list +``` + +## Tips for Best Results + +1. **Provide Context**: The more context you provide, the better the dialog +2. **Be Specific**: Include real constraints (team size, timeline, existing tech) +3. **Iterate**: Don't hesitate to ask for deeper exploration of specific points +4. **Document**: Save important dialogs as part of your project documentation +5. **Share**: Use dialogs as starting points for team discussions + +## Customization + +You can create project-specific versions by: + +1. Copying this agent to your project's `intent/agents/` directory +2. Modifying the personas to match your organization's roles +3. Adding domain-specific knowledge and constraints +4. Including references to your specific tech stack and standards + +## Support + +For questions or improvements to the Socrates agent, please open an issue in the Intent repository or contribute directly via pull request. \ No newline at end of file diff --git a/intent/plugins/claude/subagents/socrates/agent.md b/intent/plugins/claude/subagents/socrates/agent.md new file mode 100644 index 0000000..18b6d15 --- /dev/null +++ b/intent/plugins/claude/subagents/socrates/agent.md @@ -0,0 +1,219 @@ +--- +name: socrates +description: CTO Review Mode - Facilitates Socratic dialog between CTO and Tech Lead for technical decision-making +tools: Bash, Read, Write, Edit, Grep +--- + +You are a Socratic dialog facilitator specializing in technical decision-making through structured conversations between two expert personas. You implement "CTO Review Mode" - a methodology for thorough technical exploration. + +## Core Methodology + +You facilitate a Socratic dialog between two well-defined personas to explore technical decisions, architecture choices, and design trade-offs. This creates a comprehensive analysis through structured conversation rather than monolithic documentation. + +## The Two Personas + +### Socrates (The CTO) +- **Experience**: 30+ years in software engineering, digital web products, functional programming +- **Perspective**: Strategic, business-aligned, long-term thinking +- **Focus Areas**: + - Business value and ROI + - Technical debt and maintenance costs + - Team capabilities and growth + - Scalability and future-proofing + - Risk assessment and mitigation +- **Communication Style**: + - Asks probing questions + - Challenges assumptions + - Seeks clarity and simplification + - Connects technical to business outcomes +- **Typical Questions**: + - "What's the trade-off here?" + - "Have we considered...?" + - "What happens when we scale 10x?" + - "How does this align with our roadmap?" + - "What's the maintenance burden?" + +### Plato (The Tech/Product Lead) +- **Experience**: Deep technical expertise, hands-on implementation knowledge +- **Perspective**: Tactical, implementation-focused, pragmatic +- **Focus Areas**: + - Technical feasibility + - Implementation complexity + - Team expertise and capabilities + - Integration challenges + - Performance implications +- **Communication Style**: + - Provides detailed explanations + - Proposes concrete solutions + - Evaluates technical options + - Identifies hidden complexities +- **Typical Responses**: + - "We could approach it by..." + - "The complexity there is..." + - "That would require..." + - "The team has experience with..." + - "Performance-wise, we'd see..." + +## Dialog Structure + +### 1. Opening Phase +**Socrates** opens with a fundamental question about the problem or proposed solution, setting the scope and context. + +**Plato** provides comprehensive background, current state, and initial proposal. + +### 2. Exploration Phase +Multiple rounds of: +- **Socrates** asks about alternatives, concerns, edge cases +- **Plato** analyzes options, explains trade-offs, reveals complexities + +Key exploration patterns: +- Alternative approaches +- Hidden complexities +- Resource implications +- Risk factors +- Success criteria + +### 3. Challenge Phase +**Socrates** challenges core assumptions and pushes for simplification. + +**Plato** defends necessary complexity while acknowledging simplification opportunities. + +### 4. Synthesis Phase +**Socrates** asks for pragmatic recommendations given all constraints. + +**Plato** synthesizes insights into actionable proposal. + +### 5. Conclusion +Both personas agree on: +- Recommended approach +- Key trade-offs accepted +- Success metrics +- Next steps + +## When to Use CTO Review Mode + +### Ideal For: +- **Architecture Decisions**: Microservices vs monolith, database selection, API design +- **Technology Selection**: Framework choices, build vs buy, vendor evaluation +- **Complex Refactoring**: Legacy modernization, performance optimization strategies +- **Process Design**: CI/CD pipelines, deployment strategies, testing approaches +- **Integration Planning**: Third-party services, API strategies, data synchronization +- **Scaling Challenges**: Performance bottlenecks, capacity planning, load distribution + +### Not Necessary For: +- Simple bug fixes +- Routine updates +- Well-established patterns +- Emergency hotfixes +- Minor feature additions + +## Integration with Intent + +When working within Intent projects: + +1. **Steel Thread Context**: Reference specific steel threads in the dialog +2. **Decision Documentation**: Output can become part of steel thread design docs +3. **Task Generation**: Dialog conclusions can generate backlog tasks +4. **Team Alignment**: Share dialogs for team review and input + +Example integration: +``` +Socrates: "Looking at ST0042, the authentication requirements seem complex. What's driving this?" +Plato: "The steel thread specifies three key constraints: enterprise SSO support, 15-minute token expiry for compliance, and zero-downtime migration..." +``` + +## Dialog Quality Checklist + +✓ Both personas maintain authentic, distinct voices +✓ Real challenges and concerns are addressed +✓ Trade-offs are honestly evaluated +✓ Hidden complexities are revealed +✓ Pragmatic constraints are considered +✓ Clear recommendations emerge +✓ Next steps are actionable + +## Example Dialog Template + +```markdown +# CTO Review Mode: [Technical Decision] + +## Context +[Brief description of the decision needed, referencing steel thread if applicable] + +## The Socratic Dialog + +**Socrates (CTO):** [Opening question about the fundamental problem] + +**Plato (Tech Lead):** [Comprehensive explanation of current situation and initial proposal] + +**Socrates:** [Probing question about alternatives or concerns] + +**Plato:** [Detailed analysis with multiple options] + +**Socrates:** [Question about trade-offs or hidden complexity] + +**Plato:** [Honest evaluation of pros, cons, and complexities] + +**Socrates:** [Challenge to assumptions or push for simplification] + +**Plato:** [Defense of necessary complexity, acknowledgment of simplification opportunities] + +**Socrates:** [Synthesis question - "Given these constraints, what's the pragmatic path?"] + +**Plato:** [Comprehensive recommendation with reasoning] + +## Recommendation +[Clear, actionable recommendation agreed upon by both personas] + +## Key Trade-offs +- [Trade-off 1] +- [Trade-off 2] +- [Trade-off 3] + +## Next Steps +- [ ] [Specific action item] +- [ ] [Documentation needed] +- [ ] [Follow-up required] +``` + +## Best Practices + +1. **Maintain Authenticity**: Each persona should sound like a real expert, not a strawman +2. **Embrace Disagreement**: The personas can disagree and work through conflicts +3. **Stay Grounded**: Reference real constraints (time, budget, skills, existing systems) +4. **Be Specific**: Use concrete examples, not abstract concepts +5. **Document Reasoning**: Capture why decisions were made, not just what was decided +6. **Keep Focus**: Don't let dialog meander; stay on the core decision +7. **Action-Oriented**: Always end with clear next steps + +## Anti-Patterns to Avoid + +- **Strawman Arguments**: Both personas should make strong, valid points +- **Predetermined Outcomes**: Let the dialog genuinely explore and discover +- **Excessive Length**: Focus on key decision points, not exhaustive coverage +- **Missing Conclusion**: Always reach a clear recommendation +- **Ignoring Reality**: Keep real-world constraints in frame +- **One-Sided Dialog**: Both personas should contribute meaningfully +- **Abstract Discussion**: Ground conversation in specific technical details + +## Advanced Techniques + +### Multi-Stakeholder Expansion +Occasionally introduce additional voices when needed: +- **Security Architect**: For security-critical decisions +- **Data Engineer**: For data architecture choices +- **DevOps Lead**: For deployment and operations concerns + +### Progressive Refinement +Run multiple dialog sessions: +1. Initial exploration (broad options) +2. Deep dive (selected approach) +3. Implementation planning (detailed execution) + +### Team Collaboration Mode +Use the dialog as a template for actual team discussions: +- Assign team members to personas +- Run live dialog sessions +- Document outcomes in Intent + +Remember: The goal is not to create perfect solutions, but to thoroughly explore the decision space and document the reasoning behind technical choices. This creates valuable context for future development and helps teams understand not just what was decided, but why. \ No newline at end of file diff --git a/intent/plugins/claude/subagents/socrates/metadata.json b/intent/plugins/claude/subagents/socrates/metadata.json new file mode 100644 index 0000000..2447b6e --- /dev/null +++ b/intent/plugins/claude/subagents/socrates/metadata.json @@ -0,0 +1,8 @@ +{ + "name": "socrates", + "version": "1.0.0", + "description": "CTO Review Mode - Socratic dialog for technical decision-making", + "author": "Intent Contributors", + "tools": ["Bash", "Read", "Write", "Edit", "Grep"], + "tags": ["technical-review", "decision-making", "socratic-dialog", "cto-review", "architecture", "design-review", "rubber-ducking"] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/agent.md b/intent/plugins/claude/subagents/worker-bee/agent.md new file mode 100644 index 0000000..f6bebba --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/agent.md @@ -0,0 +1,224 @@ +--- +name: worker-bee +description: Worker-Bee Driven Design specialist for Elixir applications - enforces WDD architecture patterns, validates compliance, and scaffolds WDD-compliant code +tools: Bash, Read, Write, Edit, Grep, Glob, LS +--- + +## Available Resources + +This agent includes additional resources for WDD implementation: +- Configuration patterns: `resources/config/wdd_patterns.yaml` +- Mix tasks: `resources/lib/mix/tasks/wdd/` (validate, scaffold, remap) +- Helper libraries: `resources/lib/` (project_mapper.ex, template_generator.ex, wdd_validator.ex) +- Template generators: `resources/templates/` (boundary_genserver.ex.eex, functional_core.ex.eex) +- Validation rules: `resources/validation/` (boundary_rules.ex, data_rules.ex, functional_core_rules.ex, testing_rules.ex) +- Documentation: `resources/README.md` and `resources/USER_GUIDE.md` + +Note: When referencing these files in your code generation or validation, use the relative path from the agent directory. + +You are a Worker-Bee Driven Design (WDD) specialist with deep expertise in building scalable, maintainable Elixir applications using the 6-layer WDD architecture. + +## Your Expertise + +You have extensive experience in: +- Worker-Bee Driven Design (WDD) 6-layer architecture: Data, Functions, Tests, Boundaries, Lifecycles, Workers +- Functional programming patterns in Elixir with pure functional cores +- OTP design patterns, GenServers, supervision trees, and process management +- Railway-Oriented Programming with `with` statements and tagged tuples +- Pattern matching, guard clauses, and idiomatic Elixir code +- Testing strategies: unit tests for functional core, integration tests for boundaries +- Framework-agnostic Elixir application design (Phoenix, OTP, libraries, Nerves) + +## Your Role - Project Structure Understanding + +**FIRST CHECK**: Always verify if a WDD project map already exists before conducting discovery. + +When working with users, you should: + +### 1. Check for Existing Project Map +- Look for `.wdd_project_map.yaml` in the project root +- If it exists, load and use the existing mapping +- Only conduct discovery if no map exists OR user explicitly requests re-mapping +- Validate existing map makes sense with current project structure + +### 2. Project Discovery and Mapping (ONLY WHEN NEEDED) +**Trigger discovery only when:** +- No `.wdd_project_map.yaml` file exists +- User explicitly requests re-mapping +- Significant project structure changes detected +- Existing map appears outdated or incorrect + +**Discovery process:** +- Scan the current project structure using file system tools +- Identify the project type (Phoenix app, OTP application, library, umbrella, etc.) +- Ask targeted questions about where each WDD layer should live in THEIR project +- Create a customized WDD Project Map documenting their specific structure choices +- Save this mapping for use in validation and scaffolding tasks + +### 3. Interactive Structure Definition +**Only when conducting discovery:** +Ask questions like: +- "What type of Elixir project is this?" (Phoenix, OTP, library, etc.) +- "Where would you like your functional core modules to live?" +- "How do you organize your data structures?" (separate modules vs inline structs) +- "Where should boundary/API modules be located?" +- "Do you need workers/concurrency? Where should they live?" +- "What's your testing organization preference?" +- "Are you using specific frameworks that influence structure?" (Phoenix contexts, Ash, etc.) + +### 4. Generate Project-Specific WDD Map +Create documentation like: +``` +WDD Layer Mapping for [Project Name]: +├── Data Layer: [user's chosen location] +├── Functions Layer: [user's chosen location] +├── Tests Layer: [user's chosen location] +├── Boundaries Layer: [user's chosen location] +├── Lifecycles Layer: [user's chosen location] +└── Workers Layer: [user's chosen location] + +Project Type: [Phoenix/OTP/Library/etc.] +Special Considerations: [Any framework-specific patterns] +``` + +### 5. When to Suggest Re-mapping +**Proactively suggest re-mapping when you detect:** +- Files exist outside the mapped layer directories +- New directories created that don't match the project map +- User mentions structural changes to their project +- Validation results suggest architectural drift +- Project type has changed (e.g., library became Phoenix app) + +**How to suggest re-mapping:** +- "I notice files in directories not covered by your current WDD map. Would you like to update your project structure mapping?" +- "Your project structure seems to have evolved. Should we refresh the WDD layer mapping?" +- "The current project map doesn't seem to match your actual structure. Would you like to re-map your layers?" + +## WDD Architecture Principles + +### The 6 Layers ("Do Fun Things with Big, Loud Worker-Bees") +1. **Data** - Immutable data structures, structs, primitive types +2. **Functions** - Pure functional core with no side effects +3. **Tests** - Unit tests for core, integration tests for boundaries +4. **Boundaries** - GenServers, APIs, side effects management +5. **Lifecycles** - OTP supervision, application startup/shutdown +6. **Workers** - Concurrency, background jobs, process pools + +### Functional Core Principles +- Pure functions with no side effects +- Single-purpose functions with clear responsibilities +- Pipeline-friendly design (data as last parameter) +- Pattern matching over conditionals +- Functions organized by purpose, not data +- Composition through pipes and tokens + +### Boundary Layer Patterns +- Separate process machinery from business logic +- Use `with` statements for Railway-Oriented Programming +- Return tagged tuples: `{:ok, result}` or `{:error, reason}` +- Prefer GenServer.call over cast for back pressure +- Validate input at boundary, not in core +- Thin APIs that delegate to functional core + +### Testing Strategies +- Test behavior, not implementation +- Unit tests for functional core (fast, simple) +- Integration tests for boundary layer +- Use fixtures and named setups +- Property-based testing for complex algorithms +- Test composition workflows + +## Available Commands + +### mix wdd.validate +Validates the project against WDD compliance using the established project map: +- Checks functional core purity (no side effects, proper composition) +- Validates boundary layer patterns (GenServers, error handling) +- Ensures proper test organization and coverage +- Identifies architectural violations and suggests fixes + +### mix wdd.scaffold +Generates WDD-compliant code following the project's established patterns: +- Creates new modules in correct WDD layer locations +- Generates templates following project conventions +- Scaffolds complete WDD components (data + functions + tests + boundary) +- Respects established naming and organization patterns + +## Validation Areas + +### Functional Core Validation +- No GenServer calls or process spawning +- No side effects (File I/O, network calls, logging) +- Pure function composition +- Proper error handling with tagged tuples +- Single-level abstraction per function + +### Boundary Layer Validation +- Proper GenServer patterns +- Use of `with` for error composition +- Validation at API boundaries +- Appropriate use of call vs cast +- State management separation from business logic + +### Data Layer Validation +- Proper struct definitions with default values +- Appropriate use of maps vs structs +- Flat data structures (avoid deep nesting) +- Access patterns matching data structure choice + +### Testing Validation +- Tests organized by WDD layer +- Functional core tests use simple function calls +- Boundary tests exercise process behavior +- Proper use of fixtures and setup +- Descriptive test names and organization + +## Framework Awareness + +### Phoenix Applications +- Understand contexts as boundary layers +- LiveView components as presentation boundaries +- Phoenix controllers as API boundaries +- Ecto as persistence boundary + +### OTP Applications +- GenServer supervision trees +- Application callbacks and configuration +- Process registration and discovery +- Dynamic supervisors for scalable workers + +### Libraries +- Pure functional APIs +- No process machinery (unless specifically needed) +- Clear module organization +- Comprehensive documentation and specs + +## Integration with Intent + +When working within Intent projects: +- Reference steel threads for feature context +- Document WDD decisions in appropriate steel thread docs +- Generate tasks for backlog when refactoring is needed +- Follow Intent project structure and conventions +- Update documentation to reflect WDD compliance progress + +## Educational Approach + +Always explain WDD principles in context: +- Show WHY separation of concerns matters +- Demonstrate how WDD reduces complexity +- Explain trade-offs of architectural decisions +- Provide examples from the user's actual codebase +- Guide gradual refactoring rather than complete rewrites + +## Quality Standards + +Ensure your responses: +- Start with project structure discovery and mapping +- Provide specific, actionable WDD compliance feedback +- Generate code that follows established project patterns +- Explain WDD principles in the context of the user's code +- Offer incremental improvement suggestions +- Maintain backward compatibility during refactoring + +Remember: Every interaction starts with understanding the user's specific project structure. Never assume a particular organization - always discover and map first, then apply WDD principles within their chosen structure. \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/metadata.json b/intent/plugins/claude/subagents/worker-bee/metadata.json new file mode 100644 index 0000000..45e3097 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/metadata.json @@ -0,0 +1,27 @@ +{ + "name": "worker-bee", + "version": "1.0.0", + "description": "Worker-Bee Driven Design specialist for any Elixir application. Conducts interactive project structure mapping, enforces WDD 6-layer architecture compliance, validates functional core purity, and scaffolds WDD-compliant code. Works with Phoenix, OTP, libraries, and any Elixir project type.", + "author": "thebreakincoder", + "tools": ["Bash", "Read", "Write", "Edit", "Grep", "Glob", "LS"], + "tags": [ + "elixir", + "worker-bee-driven-design", + "wdd", + "architecture", + "functional-programming", + "otp", + "genserver", + "phoenix", + "code-review", + "scaffolding", + "validation", + "testing", + "functional-core", + "boundary-layer", + "railway-oriented-programming", + "pattern-matching", + "mix-tasks", + "project-structure" + ] +} \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/README.md b/intent/plugins/claude/subagents/worker-bee/resources/README.md new file mode 100644 index 0000000..985029c --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/README.md @@ -0,0 +1,222 @@ +# Worker-Bee Agent + +A specialized Intent agent for enforcing Worker-Bee Driven Design (WDD) principles in Elixir applications. + +## Overview + +The Worker-Bee agent helps maintain architectural consistency by: + +1. **Project Structure Discovery** - Interactive mapping of your project to WDD layers +2. **WDD Compliance Validation** - Automated checking against the 6-layer architecture +3. **Code Scaffolding** - Generation of WDD-compliant modules and components +4. **Educational Guidance** - Contextual explanations of WDD principles + +## Features + +### Project Structure Mapping + +Before any validation or scaffolding, the agent conducts an interactive session to understand your specific project structure: + +- Detects project type (Phoenix, OTP, library, etc.) +- Maps existing code to WDD layers +- Creates a persistent project configuration +- Respects your naming conventions and organization preferences + +### WDD Layer Architecture + +Enforces the 6-layer Worker-Bee Driven Design architecture: + +- **Data** - Immutable data structures and types +- **Functions** - Pure business logic without side effects +- **Tests** - Behavior-focused testing at all layers +- **Boundaries** - GenServers, APIs, and side effect management +- **Lifecycles** - OTP supervision and application management +- **Workers** - Concurrency and background processing + +### Mix Tasks + +#### `mix wdd.validate` + +Validates project compliance against WDD principles: + +```bash +# Validate entire project +mix wdd.validate + +# Validate specific layer +mix wdd.validate --layer functions + +# Validate single file +mix wdd.validate --file lib/my_app/core/user_service.ex + +# Generate JSON report +mix wdd.validate --output json + +# Require minimum compliance score +mix wdd.validate --min-score 80.0 +``` + +#### `mix wdd.scaffold` + +Generates WDD-compliant code following your project patterns: + +```bash +# Generate functional core module +mix wdd.scaffold functional UserService + +# Generate complete WDD component +mix wdd.scaffold component UserManagement + +# Generate boundary layer +mix wdd.scaffold boundary PaymentProcessor + +# Generate data structure +mix wdd.scaffold data User + +# Dry run to preview generation +mix wdd.scaffold component OrderProcessing --dry-run +``` + +## Installation for Another Project + +To use this agent in another project: + +1. Copy the entire `intent/agents/worker-bee/` directory to your target project +2. Run `intent agents install worker-bee` in the target project +3. Use the agent via Claude Code's Task tool: + +``` +Task( + description="Map project structure", + prompt="Help me establish WDD layer mapping for my project and validate compliance", + subagent_type="worker-bee" +) +``` + +## Validation Rules + +### Functional Core Layer +- No side effects (no GenServer calls, file I/O, network operations) +- Pure function composition with pipes +- Single-purpose functions +- Pattern matching over conditionals +- Proper error handling with tagged tuples + +### Boundary Layer +- Proper GenServer patterns +- Railway-Oriented Programming with `with` statements +- Input validation at API boundaries +- Clear separation of client API from server implementation +- Delegation to functional core for business logic + +### Data Layer +- Immutable data structures +- Proper struct definitions with defaults +- Appropriate data structure choices +- Flat structure over deep nesting + +### Testing Layer +- Behavior-focused tests (not implementation) +- Descriptive test names +- Proper test organization with describe blocks +- Specific assertions over generic ones + +## Framework Support + +Works with any Elixir project type: + +- **Phoenix** - Web applications and APIs +- **OTP Applications** - Process-oriented systems +- **Libraries** - Pure functional libraries +- **Nerves** - Embedded systems +- **Umbrella Projects** - Multi-application systems + +## Educational Approach + +The agent provides: + +- Contextual explanations of WDD principles +- Specific recommendations for your codebase +- Incremental improvement suggestions +- Examples from your actual code +- Guidance on gradual refactoring + +## Configuration + +Project mapping is stored in `.wdd_project_map.yaml`: + +```yaml +project_name: "my_app" +project_type: phoenix_web +root_path: "/path/to/project" + +wdd_layers: + data: "lib/my_app/types" + functions: "lib/my_app_web/functional_core" + tests: "test" + boundaries: "lib/my_app_web" + lifecycles: "lib/my_app/application.ex" + workers: "lib/my_app/workers" + +naming_conventions: + module_prefix: "MyApp" + functional_core_suffix: "Core" +``` + +## Best Practices + +1. **Start with Discovery** - Always begin with project structure mapping +2. **Incremental Adoption** - Use WDD principles gradually, don't rewrite everything +3. **Test Behavior** - Focus on what your code does, not how it does it +4. **Keep Core Pure** - No side effects in functional core +5. **Validate Early** - Run `mix wdd.validate` regularly during development + +## Examples + +### Typical Usage Flow + +1. **Initial Setup** + ```bash + # Agent discovers your project structure + mix wdd.validate # Triggers discovery session + ``` + +2. **Generate Components** + ```bash + # Create new WDD-compliant component + mix wdd.scaffold component OrderProcessor + ``` + +3. **Validate Compliance** + ```bash + # Check compliance regularly + mix wdd.validate --min-score 75.0 + ``` + +4. **Iterative Improvement** + ```bash + # Focus on specific issues + mix wdd.validate --layer functions --verbose + ``` + +## Troubleshooting + +### No Project Map Found +Run `mix wdd.validate` to trigger interactive discovery session. + +### Validation Failures +Use `--verbose` flag to see detailed violation information and recommendations. + +### Generation Conflicts +Use `--force` flag to overwrite existing files, or `--dry-run` to preview changes. + +## Contributing + +This agent follows Worker-Bee Driven Design principles in its own implementation: + +- Pure validation logic in functional core modules +- GenServer boundaries for state management +- Comprehensive test coverage +- Clear separation of concerns + +Generated by Worker-Bee Agent v1.0.0 \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/USER_GUIDE.md b/intent/plugins/claude/subagents/worker-bee/resources/USER_GUIDE.md new file mode 100644 index 0000000..dd66653 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/USER_GUIDE.md @@ -0,0 +1,563 @@ +# Worker-Bee Agent User Guide + +Complete guide for using the Worker-Bee Intent agent to enforce Worker-Bee Driven Design (WDD) in your Elixir projects. + +## Quick Start + +### 1. Install the Agent +```bash +intent agents install worker-bee +``` + +### 2. Initial Project Discovery +Run this command in your Elixir project to trigger the interactive project mapping: +```bash +mix wdd.validate +``` + +The agent will: +- Scan your project structure +- Ask targeted questions about your layer organization +- Create a `.wdd_project_map.yaml` file with your specific structure +- Remember your choices for future validations + +### 3. Daily Development Workflow +```bash +# Generate WDD-compliant components +mix wdd.scaffold component UserService + +# Validate architecture compliance +mix wdd.validate --min-score 75.0 + +# Create specific layer components +mix wdd.scaffold functional PaymentProcessor +mix wdd.scaffold boundary NotificationService +``` + +## Understanding Worker-Bee Driven Design + +### The 6 Layers + +Worker-Bee uses a mnemonic: **"Do Fun Things with Big, Loud Worker-Bees"** + +1. **Data** - Immutable structures, structs, types +2. **Functions** - Pure business logic with no side effects +3. **Tests** - Behavior-focused testing at all layers +4. **Boundaries** - GenServers, APIs, side effect management +5. **Lifecycles** - OTP supervision, application startup/shutdown +6. **Workers** - Concurrency, background jobs, process pools + +### Key Principles + +**Functional Core** +- No side effects (no GenServer calls, file I/O, network operations) +- Pure function composition using pipes (`|>`) +- Single-purpose functions with clear responsibilities +- Pattern matching over conditionals +- Railway-Oriented Programming with tagged tuples + +**Boundary Layer** +- Separate process machinery from business logic +- Use `with` statements for error composition +- Return `{:ok, result}` or `{:error, reason}` +- Validate input at boundaries, delegate to functional core +- Prefer `GenServer.call` over `cast` for back pressure + +## Project Mapping (One-Time Setup) + +### When Discovery Happens + +The agent **only** conducts discovery when: +- No `.wdd_project_map.yaml` file exists +- You explicitly run `mix wdd.remap` +- You use the `--remap` flag with validation/scaffolding +- The agent detects significant structural changes + +### Discovery Questions + +The agent will ask about your specific project: + +``` +What type of Elixir project is this? +[1] Phoenix Web Application +[2] Phoenix API +[3] OTP Application +[4] Library +[5] Nerves/Embedded +[6] Umbrella Project + +Where would you like your functional core modules? +Current structure shows: lib/my_app/, lib/my_app_web/ +Options: +[1] lib/my_app/core/ +[2] lib/my_app/business/ +[3] lib/my_app_web/functional_core/ +[custom] Enter custom path +``` + +### Example Project Map + +After discovery, you'll have a `.wdd_project_map.yaml`: + +```yaml +project_name: "my_app" +project_type: phoenix_web +root_path: "/path/to/project" + +wdd_layers: + data: "lib/my_app/types" + functions: "lib/my_app/core" + tests: "test" + boundaries: "lib/my_app_web" + lifecycles: "lib/my_app/application.ex" + workers: "lib/my_app/workers" + +naming_conventions: + module_prefix: "MyApp" + functional_core_suffix: "Core" +``` + +## Using the Agent with Claude Code + +### Basic Agent Invocation + +``` +Task( + description="Validate WDD compliance", + prompt="Review my functional core modules for purity and suggest improvements", + subagent_type="worker-bee" +) +``` + +### Specific Use Cases + +**Architecture Review** +``` +Task( + description="WDD architecture review", + prompt="Analyze my current project structure and suggest WDD layer organization. I have a Phoenix app with contexts in lib/my_app/ and web modules in lib/my_app_web/", + subagent_type="worker-bee" +) +``` + +**Code Generation** +``` +Task( + description="Generate WDD component", + prompt="Create a complete WDD component for user authentication including functional core, boundary layer, and tests", + subagent_type="worker-bee" +) +``` + +**Compliance Validation** +``` +Task( + description="Check WDD compliance", + prompt="Validate this module for functional core purity: [paste your code]. Check for side effects and suggest improvements.", + subagent_type="worker-bee" +) +``` + +**Refactoring Guidance** +``` +Task( + description="WDD refactoring advice", + prompt="I have this GenServer that's doing too much business logic. Help me separate concerns using WDD principles: [paste code]", + subagent_type="worker-bee" +) +``` + +## Mix Tasks Reference + +### `mix wdd.validate` + +Validates your project against WDD principles. + +```bash +# Basic validation +mix wdd.validate + +# Validate specific layer +mix wdd.validate --layer functions + +# Validate single file +mix wdd.validate --file lib/my_app/core/user_service.ex + +# Set minimum compliance score +mix wdd.validate --min-score 80.0 + +# Force re-mapping if needed +mix wdd.validate --remap + +# JSON output for CI/CD +mix wdd.validate --output json +``` + +**Example Output:** +``` +🔍 Worker-Bee WDD Validation Report +===================================== + +Project: MyApp (phoenix_web) +Overall Compliance: 78.5/100 + +✅ Data Layer (lib/my_app/types): 95/100 + - Proper struct definitions + - Good use of defaults + +⚠️ Functions Layer (lib/my_app/core): 65/100 + - VIOLATION: GenServer.call found in user_service.ex:42 + - SUGGESTION: Move side effects to boundary layer + +❌ Boundaries Layer (lib/my_app_web): 45/100 + - VIOLATION: Business logic in controller + - SUGGESTION: Extract to functional core +``` + +### `mix wdd.scaffold` + +Generates WDD-compliant code following your project conventions. + +```bash +# Generate complete component +mix wdd.scaffold component UserManagement + +# Generate specific layers +mix wdd.scaffold functional PaymentProcessor +mix wdd.scaffold boundary EmailService +mix wdd.scaffold data User +mix wdd.scaffold worker BackgroundProcessor +mix wdd.scaffold supervisor TaskSupervisor + +# Dry run to preview +mix wdd.scaffold component OrderProcessing --dry-run + +# Force overwrite existing files +mix wdd.scaffold functional UserService --force +``` + +**Generated Structure:** +``` +lib/my_app/ +├── core/ +│ ├── user_management.ex # Functional core +│ └── user_management/ +│ ├── user_validator.ex +│ └── user_transformer.ex +├── types/ +│ └── user.ex # Data structures +└── boundaries/ + └── user_management_server.ex # GenServer boundary + +test/ +├── core/ +│ └── user_management_test.exs # Unit tests +└── boundaries/ + └── user_management_server_test.exs # Integration tests +``` + +### `mix wdd.remap` + +Updates your project structure mapping. + +```bash +# Interactive remapping +mix wdd.remap + +# Skip confirmation prompts +mix wdd.remap --force + +# Don't create backup +mix wdd.remap --no-backup + +# Quiet mode +mix wdd.remap --quiet +``` + +## Common Workflows + +### Starting a New Feature + +1. **Plan the Component** + ```bash + # Use agent to design the architecture + Task( + description="Design WDD component", + prompt="I need to add user notification functionality. Help me design the WDD layers and structure.", + subagent_type="worker-bee" + ) + ``` + +2. **Generate the Scaffold** + ```bash + mix wdd.scaffold component UserNotifications + ``` + +3. **Implement Business Logic** + - Focus on functional core first (pure functions) + - Add data structures as needed + - Keep side effects in boundary layer + +4. **Validate Compliance** + ```bash + mix wdd.validate --layer functions --min-score 85.0 + ``` + +### Refactoring Existing Code + +1. **Assess Current State** + ```bash + mix wdd.validate --file lib/my_app/problematic_module.ex + ``` + +2. **Get Refactoring Guidance** + ``` + Task( + description="WDD refactoring plan", + prompt="This module violates WDD principles: [paste code]. Provide step-by-step refactoring plan to separate concerns.", + subagent_type="worker-bee" + ) + ``` + +3. **Implement Gradually** + - Extract pure functions first + - Move side effects to boundaries + - Add proper error handling + - Update tests + +4. **Validate Improvements** + ```bash + mix wdd.validate --file lib/my_app/refactored_module.ex + ``` + +### Code Review Process + +1. **Pre-commit Validation** + ```bash + mix wdd.validate --min-score 75.0 + ``` + +2. **Agent-Assisted Review** + ``` + Task( + description="WDD code review", + prompt="Review these changes for WDD compliance: [paste diff or file]. Focus on functional core purity and boundary separation.", + subagent_type="worker-bee" + ) + ``` + +3. **Team Education** + ``` + Task( + description="Explain WDD violation", + prompt="Explain to my team why this code violates WDD principles and how to fix it: [paste code]", + subagent_type="worker-bee" + ) + ``` + +## Framework-Specific Guidance + +### Phoenix Applications + +**Contexts as Boundaries** +- Phoenix contexts naturally map to WDD boundary layer +- Keep business logic in functional core, not contexts +- Use contexts for API and side effect coordination + +**Controllers** +- Thin controllers that delegate to contexts +- Input validation and serialization only +- No business logic in controllers + +**LiveView Components** +- UI logic separate from business logic +- Event handlers delegate to contexts +- Pure functions for data transformation + +### OTP Applications + +**Supervision Trees** +- Map to WDD lifecycle layer +- Keep supervisor logic simple +- Business logic in supervised processes + +**GenServers** +- Focus on process management, not business logic +- Delegate complex operations to functional core +- Use `with` statements for error handling + +### Libraries + +**Pure Functional APIs** +- Emphasize functional core layer +- Minimal or no process machinery +- Clear module organization +- Comprehensive documentation + +## Best Practices + +### Do's + +✅ **Start with Data and Functions** +- Define your data structures first +- Build pure functions that transform data +- Add boundaries only when needed + +✅ **Use Agent for Architecture Decisions** +- Consult the agent when designing new components +- Ask for WDD-specific guidance +- Get explanations of violations + +✅ **Validate Regularly** +- Run `mix wdd.validate` frequently +- Set compliance score targets +- Address violations early + +✅ **Embrace the Discovery Process** +- Answer mapping questions thoughtfully +- Consider your team's conventions +- Update mapping when project evolves + +### Don'ts + +❌ **Don't Skip Project Mapping** +- Always let the agent understand your structure +- Don't assume default layouts +- Don't ignore re-mapping suggestions + +❌ **Don't Mix Concerns** +- Keep business logic out of GenServers +- Avoid side effects in functional core +- Don't put UI logic in business modules + +❌ **Don't Ignore Validation Warnings** +- Address compliance violations promptly +- Understand WHY rules exist +- Ask agent for clarification when confused + +## Troubleshooting + +### Agent Not Finding Project Map + +**Problem:** Agent keeps asking for project structure +**Solution:** +```bash +# Check if map file exists +ls -la .wdd_project_map.yaml + +# If missing, run discovery +mix wdd.validate + +# If corrupted, re-map +mix wdd.remap +``` + +### Low Compliance Scores + +**Problem:** Validation shows low scores +**Solution:** +```bash +# Get detailed feedback +mix wdd.validate --verbose + +# Ask agent for specific help +Task( + description="Fix WDD violations", + prompt="My compliance score is low. Help me understand and fix these specific violations: [paste validation output]", + subagent_type="worker-bee" +) +``` + +### Generated Code Doesn't Match Project + +**Problem:** Scaffolded code doesn't follow your patterns +**Solution:** +```bash +# Update project mapping +mix wdd.remap + +# Verify layer paths are correct +cat .wdd_project_map.yaml + +# Regenerate with updated mapping +mix wdd.scaffold component MyComponent --force +``` + +### Agent Seems Confused About Project + +**Problem:** Agent suggestions don't fit your project type +**Solution:** +``` +Task( + description="Update project understanding", + prompt="My project structure has changed significantly. It's now a [Phoenix app/OTP app/library] with [describe structure]. Please help me re-map the WDD layers.", + subagent_type="worker-bee" +) +``` + +## Advanced Usage + +### CI/CD Integration + +```bash +# In your CI pipeline +mix wdd.validate --output json --min-score 70.0 +if [ $? -ne 0 ]; then + echo "WDD compliance below threshold" + exit 1 +fi +``` + +### Team Adoption Strategy + +1. **Start with New Code** + - Use agent for all new components + - Don't refactor everything at once + - Set compliance targets gradually + +2. **Education Focus** + - Use agent to explain violations + - Share WDD principles with team + - Review generated code together + +3. **Gradual Migration** + - Identify high-impact violations first + - Refactor incrementally + - Measure compliance improvement + +### Custom Templates + +The agent uses EEx templates that can be customized: +- `templates/functional_core.ex.eex` +- `templates/boundary_genserver.ex.eex` +- Add your own templates to match team conventions + +## Getting Help + +### Agent Assistance + +The worker-bee agent is designed to be educational. Always ask for explanations: + +``` +Task( + description="Explain WDD concept", + prompt="I don't understand why [specific pattern] violates WDD principles. Can you explain the reasoning and show me the correct approach?", + subagent_type="worker-bee" +) +``` + +### Common Questions + +**Q: How do I handle database operations in functional core?** +A: You don't. Database operations are side effects that belong in the boundary layer. Pass data to functional core, return instructions for what to persist. + +**Q: Can I use Logger in functional core?** +A: No. Logging is a side effect. Return success/error tuples and let boundary layer handle logging. + +**Q: What about configuration access?** +A: Pass configuration as parameters to functional core functions. Don't access Application config directly. + +**Q: How do I test GenServer behavior?** +A: Integration tests in boundary layer test the process behavior. Unit tests in functional core test business logic. + +Remember: The worker-bee agent is here to help you understand and apply WDD principles. Don't hesitate to ask for clarification, examples, or step-by-step guidance for any WDD concept. \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/config/wdd_patterns.yaml b/intent/plugins/claude/subagents/worker-bee/resources/config/wdd_patterns.yaml new file mode 100644 index 0000000..12ae14c --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/config/wdd_patterns.yaml @@ -0,0 +1,156 @@ +# Worker-Bee Driven Design Pattern Definitions +# Used by the validation engine to identify WDD compliance + +functional_core_patterns: + pure_function_indicators: + - "def.*->.*" + - "defp.*->.*" + - "|>" + - "with.*<-" + + side_effect_violations: + - "GenServer\\." + - "Agent\\." + - "Task\\." + - "spawn" + - "Process\\." + - "File\\." + - "IO\\." + - "Logger\\." + - "Repo\\." + - "HTTPoison\\." + - "Tesla\\." + + composition_patterns: + - "\\|>" + - "with.*<-" + - "\\{:ok,.*\\}" + - "\\{:error,.*\\}" + +boundary_layer_patterns: + genserver_structure: + required_callbacks: + - "def init" + recommended_callbacks: + - "def handle_call" + - "def handle_cast" + - "def handle_info" + + error_handling: + - "with.*<-" + - "\\{:ok,.*\\}" + - "\\{:error,.*\\}" + - "else" + + api_design: + - "@spec" + - "def start_link" + - "GenServer\\.call" + +data_layer_patterns: + struct_patterns: + - "defstruct" + - "@type.*::" + - "\\%\\{.*\\|.*\\}" + + immutability_indicators: + - "\\%\\{.*\\|.*\\}" + - "Map\\.put" + - "struct\\(" + + anti_patterns: + - "\\%\\{.*\\%\\{.*\\%\\{" # Deep nesting + +testing_patterns: + organization: + - "describe.*do" + - "setup.*do" + - "test.*do" + + behavior_focus: + - "assert.*==.*" + - "refute.*==.*" + - "assert_receive" + + anti_patterns: + - "assert true" + - "assert false" + - "private_function" + +worker_patterns: + concurrency_indicators: + - "use GenServer" + - "Task\\." + - "Supervisor" + - "DynamicSupervisor" + + background_processing: + - "handle_cast" + - "handle_info" + - "Process\\.send_after" + +lifecycle_patterns: + supervision: + - "use Supervisor" + - "use Application" + - "children.*=" + - "Supervisor\\.start_link" + + application_structure: + - "def start" + - "def stop" + - "child_spec" + +complexity_thresholds: + function_complexity: + low: 3 + medium: 7 + high: 10 + + module_responsibilities: + max_responsibilities: 3 + + function_parameters: + max_parameters: 4 + +project_type_indicators: + phoenix_web: + - ":phoenix" + - "Phoenix\\." + - "router\\.ex" + - "endpoint\\.ex" + - "_web" + + phoenix_api: + - ":phoenix" + - "Phoenix\\." + - "router\\.ex" + - "api" + + otp_application: + - "use Application" + - "use Supervisor" + - "GenServer" + + library: + - "defmodule.*do" + - "def.*" + - "!.*Application" + - "!.*Supervisor" + +naming_conventions: + module_naming: + - "^[A-Z][a-zA-Z0-9]*$" + + function_naming: + - "^[a-z_][a-z0-9_]*[?!]?$" + + test_naming: + - ".*_test\\.exs$" + + avoid_names: + - "temp" + - "tmp" + - "test" + - "foo" + - "bar" \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/remap.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/remap.ex new file mode 100644 index 0000000..d381162 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/remap.ex @@ -0,0 +1,211 @@ +defmodule Mix.Tasks.Wdd.Remap do + @moduledoc """ + Mix task for re-mapping Worker-Bee Driven Design project structure. + + This task allows you to update your project's WDD layer mapping when + your project structure has evolved or when you want to reorganize + your WDD layer assignments. + + ## Usage + + mix wdd.remap [options] + + ## Options + + * `--path` - Path to project directory (defaults to current directory) + * `--backup` - Create backup of existing project map (default: true) + * `--force` - Skip confirmation prompts + * `--quiet` - Minimal output + + ## Examples + + # Re-map project structure interactively + mix wdd.remap + + # Re-map without backup + mix wdd.remap --no-backup + + # Re-map with no prompts + mix wdd.remap --force + + ## When to Re-map + + Consider re-mapping when: + - You've reorganized your project directories + - You've changed from one project type to another (e.g., library to Phoenix app) + - You've added new layers or changed layer organization + - WDD validation suggests your map is outdated + - You want to adopt different naming conventions + + ## Backup and Recovery + + By default, this task creates a backup of your existing project map: + - Backup saved as `.wdd_project_map.yaml.backup` + - Use the backup to restore if needed + - Backups are timestamped if multiple backups exist + + Generated by Worker-Bee Agent + """ + + use Mix.Task + + alias WorkerBee.ProjectMapper + + @shortdoc "Re-maps Worker-Bee Driven Design project structure" + + @switches [ + path: :string, + backup: :boolean, + force: :boolean, + quiet: :boolean, + help: :boolean + ] + + @aliases [ + p: :path, + f: :force, + q: :quiet, + h: :help + ] + + @impl true + def run(args) do + {opts, _} = OptionParser.parse!(args, switches: @switches, aliases: @aliases) + + if opts[:help] do + show_help() + else + remap_project(opts) + end + end + + defp remap_project(opts) do + project_path = opts[:path] || File.cwd!() + + unless opts[:quiet] do + Mix.shell().info("🔄 Worker-Bee WDD Project Re-mapping") + Mix.shell().info("=" |> String.duplicate(40)) + end + + with :ok <- confirm_remapping(opts), + :ok <- backup_existing_map(project_path, opts), + {:ok, project_map} <- perform_discovery(project_path, opts), + :ok <- save_new_map(project_map, project_path, opts) do + + unless opts[:quiet] do + Mix.shell().info("\n✅ Project re-mapping completed successfully!") + display_new_mapping(project_map) + display_next_steps() + end + else + :cancelled -> + unless opts[:quiet] do + Mix.shell().info("Re-mapping cancelled.") + end + + {:error, reason} -> + Mix.shell().error("❌ Re-mapping failed: #{reason}") + System.halt(1) + end + end + + defp confirm_remapping(opts) do + if opts[:force] or opts[:quiet] do + :ok + else + Mix.shell().info("\n⚠️ This will replace your current WDD project mapping.") + + if Mix.shell().yes?("Continue with re-mapping?") do + :ok + else + :cancelled + end + end + end + + defp backup_existing_map(project_path, opts) do + map_file = Path.join(project_path, ".wdd_project_map.yaml") + backup_enabled = Keyword.get(opts, :backup, true) + + if File.exists?(map_file) and backup_enabled do + backup_file = generate_backup_filename(project_path) + + case File.copy(map_file, backup_file) do + {:ok, _} -> + unless opts[:quiet] do + Mix.shell().info("📦 Existing map backed up to #{Path.basename(backup_file)}") + end + :ok + + {:error, reason} -> + {:error, "Failed to create backup: #{reason}"} + end + else + :ok + end + end + + defp generate_backup_filename(project_path) do + base_backup = Path.join(project_path, ".wdd_project_map.yaml.backup") + + if File.exists?(base_backup) do + timestamp = DateTime.utc_now() |> DateTime.to_unix() + Path.join(project_path, ".wdd_project_map.yaml.backup.#{timestamp}") + else + base_backup + end + end + + defp perform_discovery(project_path, opts) do + unless opts[:quiet] do + Mix.shell().info("\n🔍 Starting project structure discovery...") + end + + ProjectMapper.discover_project_structure(project_path) + end + + defp save_new_map(project_map, project_path, opts) do + map_file = Path.join(project_path, ".wdd_project_map.yaml") + + case ProjectMapper.save_project_map(project_map, map_file) do + {:ok, _message} -> + unless opts[:quiet] do + Mix.shell().info("💾 New project map saved to #{Path.basename(map_file)}") + end + :ok + + {:error, reason} -> + {:error, "Failed to save project map: #{reason}"} + end + end + + defp display_new_mapping(project_map) do + Mix.shell().info("\n📋 New WDD Layer Mapping:") + Mix.shell().info("Project: #{project_map.project_name} (#{project_map.project_type})") + + Enum.each(project_map.layer_paths, fn {layer, path} -> + Mix.shell().info(" #{format_layer_name(layer)}: #{path}") + end) + end + + defp display_next_steps do + Mix.shell().info("\n📋 Next Steps:") + Mix.shell().info(" 1. Run 'mix wdd.validate' to check compliance with new mapping") + Mix.shell().info(" 2. Use 'mix wdd.scaffold' to generate code following new structure") + Mix.shell().info(" 3. Update existing code to match new layer organization if needed") + + Mix.shell().info("\n💡 Pro Tip:") + Mix.shell().info(" Your old mapping is backed up - you can restore it if needed") + end + + defp format_layer_name(layer) do + layer + |> Atom.to_string() + |> String.capitalize() + |> String.pad_trailing(10) + end + + defp show_help do + Mix.shell().info(@moduledoc) + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/scaffold.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/scaffold.ex new file mode 100644 index 0000000..e161060 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/scaffold.ex @@ -0,0 +1,399 @@ +defmodule Mix.Tasks.Wdd.Scaffold do + @moduledoc """ + Mix task for scaffolding Worker-Bee Driven Design compliant modules. + + This task generates WDD-compliant Elixir modules based on your project's + established structure and conventions. It creates properly organized code + following the 6-layer WDD architecture. + + ## Usage + + mix wdd.scaffold TYPE NAME [options] + + ## Types + + * `functional` - Generate functional core module + * `boundary` - Generate boundary layer (GenServer + API) + * `data` - Generate data structure module + * `worker` - Generate worker process + * `supervisor` - Generate supervisor module + * `component` - Generate complete WDD component (all layers) + + ## Options + + * `--path` - Target project directory (defaults to current directory) + * `--module-prefix` - Override module prefix from project map + * `--no-tests` - Skip test file generation + * `--no-docs` - Skip documentation generation + * `--dry-run` - Show what would be generated without creating files + * `--force` - Overwrite existing files + * `--quiet` - Minimal output + * `--remap` - Force re-mapping of project structure before scaffolding + * `--force-discovery` - Alias for --remap + + ## Examples + + # Generate functional core module + mix wdd.scaffold functional UserService + + # Generate complete WDD component + mix wdd.scaffold component UserManagement + + # Generate boundary layer with custom options + mix wdd.scaffold boundary PaymentProcessor --force + + # Generate data structure + mix wdd.scaffold data User + + # Dry run to see what would be generated + mix wdd.scaffold component OrderProcessing --dry-run + + ## Project Structure Discovery + + This task checks for an existing WDD project map (.wdd_project_map.yaml) first: + + - If found, uses the existing mapping for scaffolding + - If not found, guides you through interactive mapping to establish structure + - Use --remap to force re-discovery even when a map exists + + Generated code follows the established project structure and conventions. + + ## Component Types Details + + ### functional + Creates a pure functional core module with: + - Business logic functions + - Type specifications + - Documentation + - Pure function patterns + - Corresponding tests + + ### boundary + Creates boundary layer modules with: + - GenServer for state management + - API module for clean interface + - Error handling with 'with' statements + - Integration tests + + ### data + Creates data structure module with: + - Struct definition with defaults + - Constructor and update functions + - Validation functions + - Type specifications + + ### worker + Creates worker process with: + - Background job processing + - Queue management + - Concurrent work handling + - OTP compliance + + ### supervisor + Creates supervisor module with: + - Child process management + - Restart strategies + - Dynamic child handling + + ### component + Creates complete WDD component with: + - Data layer (structs, types) + - Functional core (business logic) + - Boundary layer (GenServer + API) + - Comprehensive tests + - All properly organized in WDD layers + + Generated by Worker-Bee Agent + """ + + use Mix.Task + + alias WorkerBee.{ProjectMapper, TemplateGenerator} + + @shortdoc "Scaffolds Worker-Bee Driven Design compliant modules" + + @component_types ~w(functional boundary data worker supervisor component) + + @switches [ + path: :string, + module_prefix: :string, + no_tests: :boolean, + no_docs: :boolean, + dry_run: :boolean, + force: :boolean, + quiet: :boolean, + remap: :boolean, + force_discovery: :boolean, + help: :boolean + ] + + @aliases [ + p: :path, + m: :module_prefix, + d: :dry_run, + f: :force, + q: :quiet, + r: :remap, + h: :help + ] + + @impl true + def run([]) do + show_help() + end + + @impl true + def run(args) do + {opts, args} = OptionParser.parse!(args, switches: @switches, aliases: @aliases) + + if opts[:help] do + show_help() + else + case args do + [type, name | _] when type in @component_types -> + scaffold_component(type, name, opts) + + [type, _name | _] -> + Mix.shell().error("❌ Unknown component type: #{type}") + Mix.shell().info("Available types: #{Enum.join(@component_types, ", ")}") + System.halt(1) + + [type] when type in @component_types -> + Mix.shell().error("❌ Component name required") + Mix.shell().info("Usage: mix wdd.scaffold #{type} MyComponentName") + System.halt(1) + + _ -> + show_help() + System.halt(1) + end + end + end + + defp scaffold_component(type, name, opts) do + project_path = opts[:path] || File.cwd!() + + unless opts[:quiet] do + Mix.shell().info("🐝 Worker-Bee WDD Scaffolding") + Mix.shell().info("=" |> String.duplicate(35)) + Mix.shell().info("Type: #{type}") + Mix.shell().info("Name: #{name}") + end + + with {:ok, project_map} <- ensure_project_map(project_path, opts), + {:ok, generated_files} <- generate_component(type, name, project_map, opts), + :ok <- create_files(generated_files, opts) do + + unless opts[:quiet] do + Mix.shell().info("\n✅ Scaffolding completed successfully!") + Mix.shell().info("Generated #{length(generated_files)} file(s):") + + Enum.each(generated_files, fn file_path -> + Mix.shell().info(" • #{file_path}") + end) + + display_next_steps(type, name, opts) + end + else + {:error, reason} -> + Mix.shell().error("❌ Scaffolding failed: #{reason}") + System.halt(1) + end + end + + defp ensure_project_map(project_path, opts) do + map_file = Path.join(project_path, ".wdd_project_map.yaml") + force_remap = opts[:remap] || opts[:force_discovery] + + cond do + force_remap -> + unless opts[:quiet] do + Mix.shell().info("🔄 Re-mapping project structure as requested...") + end + perform_discovery(project_path, map_file, opts) + + File.exists?(map_file) -> + unless opts[:quiet] do + Mix.shell().info("📋 Using existing WDD project map") + end + ProjectMapper.load_project_map(map_file) + + true -> + unless opts[:quiet] do + Mix.shell().info("📂 No WDD project map found. Starting discovery session...") + end + perform_discovery(project_path, map_file, opts) + end + end + + defp perform_discovery(project_path, map_file, opts) do + case ProjectMapper.discover_project_structure(project_path) do + {:ok, project_map} -> + ProjectMapper.save_project_map(project_map, map_file) + unless opts[:quiet] do + Mix.shell().info("✅ Project map created at #{map_file}") + end + {:ok, project_map} + + error -> + error + end + end + + defp generate_component(type, name, project_map, opts) do + component_type = String.to_atom(type) + options = build_generation_options(opts) + + case TemplateGenerator.scaffold_component(".", name, component_type, options) do + {:ok, file_paths} -> {:ok, file_paths} + {:error, reason} -> {:error, reason} + end + end + + defp build_generation_options(opts) do + options = %{} + + options = if opts[:module_prefix] do + Map.put(options, :module_prefix, opts[:module_prefix]) + else + options + end + + options = Map.put(options, :with_tests, not opts[:no_tests]) + options = Map.put(options, :with_docs, not opts[:no_docs]) + + options + end + + defp create_files(file_paths, opts) do + cond do + opts[:dry_run] -> + display_dry_run_results(file_paths, opts) + :ok + + true -> + create_actual_files(file_paths, opts) + end + end + + defp display_dry_run_results(file_paths, opts) do + unless opts[:quiet] do + Mix.shell().info("\n🔍 Dry Run - Files that would be generated:") + + Enum.each(file_paths, fn file_path -> + status = if File.exists?(file_path) do + "📝 [OVERWRITE]" + else + "📄 [NEW]" + end + + Mix.shell().info(" #{status} #{file_path}") + end) + + Mix.shell().info("\nRun without --dry-run to create these files.") + end + end + + defp create_actual_files(file_paths, opts) do + conflicts = check_for_conflicts(file_paths, opts[:force]) + + case conflicts do + [] -> + write_files(file_paths, opts) + :ok + + conflict_files -> + handle_conflicts(conflict_files, opts) + end + end + + defp check_for_conflicts(file_paths, force?) do + if force? do + [] + else + Enum.filter(file_paths, &File.exists?/1) + end + end + + defp handle_conflicts(conflict_files, opts) do + unless opts[:quiet] do + Mix.shell().error("\n⚠️ File conflicts detected:") + Enum.each(conflict_files, fn file -> + Mix.shell().error(" • #{file}") + end) + Mix.shell().info("\nUse --force to overwrite existing files") + end + + {:error, "File conflicts detected. Use --force to overwrite."} + end + + defp write_files(file_paths, opts) do + unless opts[:quiet] do + Mix.shell().info("\n📝 Creating files...") + end + + Enum.each(file_paths, fn file_path -> + File.mkdir_p!(Path.dirname(file_path)) + unless opts[:quiet] do + Mix.shell().info(" ✓ #{file_path}") + end + end) + end + + defp display_next_steps(type, name, opts) do + unless opts[:quiet] do + Mix.shell().info("\n📋 Next Steps:") + + case type do + "functional" -> + Mix.shell().info(" 1. Implement business logic in #{name}") + Mix.shell().info(" 2. Add type specifications") + Mix.shell().info(" 3. Run tests: mix test") + Mix.shell().info(" 4. Validate WDD compliance: mix wdd.validate") + + "boundary" -> + Mix.shell().info(" 1. Define GenServer state and callbacks") + Mix.shell().info(" 2. Implement API functions") + Mix.shell().info(" 3. Add to supervision tree") + Mix.shell().info(" 4. Run integration tests") + Mix.shell().info(" 5. Validate WDD compliance: mix wdd.validate") + + "data" -> + Mix.shell().info(" 1. Define struct fields and defaults") + Mix.shell().info(" 2. Implement validation functions") + Mix.shell().info(" 3. Add type specifications") + Mix.shell().info(" 4. Run tests: mix test") + + "worker" -> + Mix.shell().info(" 1. Implement work processing logic") + Mix.shell().info(" 2. Add to supervision tree") + Mix.shell().info(" 3. Configure job queue") + Mix.shell().info(" 4. Test concurrent processing") + + "supervisor" -> + Mix.shell().info(" 1. Define child specifications") + Mix.shell().info(" 2. Configure restart strategies") + Mix.shell().info(" 3. Add to application supervision tree") + Mix.shell().info(" 4. Test failure scenarios") + + "component" -> + Mix.shell().info(" 1. Implement data structures") + Mix.shell().info(" 2. Add business logic to functional core") + Mix.shell().info(" 3. Configure boundary layer") + Mix.shell().info(" 4. Add to supervision tree") + Mix.shell().info(" 5. Run full test suite") + Mix.shell().info(" 6. Validate WDD compliance: mix wdd.validate") + end + + Mix.shell().info("\n💡 Pro Tips:") + Mix.shell().info(" • Use 'mix wdd.validate' to check compliance") + Mix.shell().info(" • Follow Railway-Oriented Programming patterns") + Mix.shell().info(" • Keep functional core pure (no side effects)") + Mix.shell().info(" • Test behavior, not implementation") + end + end + + defp show_help do + Mix.shell().info(@moduledoc) + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/validate.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/validate.ex new file mode 100644 index 0000000..b662203 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/mix/tasks/wdd/validate.ex @@ -0,0 +1,475 @@ +defmodule Mix.Tasks.Wdd.Validate do + @moduledoc """ + Mix task for validating Worker-Bee Driven Design compliance. + + This task analyzes your Elixir project against WDD principles and provides + detailed feedback on compliance issues and recommendations. + + ## Usage + + mix wdd.validate [options] + + ## Options + + * `--path` - Path to project directory (defaults to current directory) + * `--layer` - Validate specific WDD layer only (data, functions, tests, boundaries, lifecycles, workers) + * `--file` - Validate specific file only + * `--output` - Output format: text (default), json, html + * `--min-score` - Minimum compliance score threshold (0.0-100.0) + * `--strict` - Treat warnings as errors + * `--quiet` - Only show violations and summary + * `--verbose` - Show detailed analysis information + * `--remap` - Force re-mapping of project structure before validation + * `--force-discovery` - Alias for --remap + + ## Examples + + # Validate entire project + mix wdd.validate + + # Validate only functional core layer + mix wdd.validate --layer functions + + # Validate specific file + mix wdd.validate --file lib/my_app/core/user_service.ex + + # Generate JSON report + mix wdd.validate --output json + + # Require minimum 80% compliance score + mix wdd.validate --min-score 80.0 + + # Force re-mapping project structure + mix wdd.validate --remap + + ## Project Structure Discovery + + This task checks for an existing WDD project map (.wdd_project_map.yaml) first: + + - If found, uses the existing mapping (shows "📋 Using existing WDD project map") + - If not found, guides you through interactive mapping to establish structure + - Use --remap to force re-discovery even when a map exists + + The mapping is saved and reused for future validations unless explicitly re-mapped. + + Generated by Worker-Bee Agent + """ + + use Mix.Task + + alias WorkerBee.{ProjectMapper, WddValidator} + + @shortdoc "Validates Worker-Bee Driven Design compliance" + + @switches [ + path: :string, + layer: :string, + file: :string, + output: :string, + min_score: :float, + strict: :boolean, + quiet: :boolean, + verbose: :boolean, + remap: :boolean, + force_discovery: :boolean, + help: :boolean + ] + + @aliases [ + p: :path, + l: :layer, + f: :file, + o: :output, + m: :min_score, + s: :strict, + q: :quiet, + v: :verbose, + r: :remap, + h: :help + ] + + @impl true + def run(args) do + {opts, _} = OptionParser.parse!(args, switches: @switches, aliases: @aliases) + + if opts[:help] do + show_help() + else + validate_project(opts) + end + end + + defp validate_project(opts) do + project_path = opts[:path] || File.cwd!() + + Mix.shell().info("🐝 Worker-Bee WDD Validation") + Mix.shell().info("=" |> String.duplicate(40)) + + with {:ok, project_map} <- ensure_project_map(project_path, opts), + {:ok, validation_result} <- run_validation(project_path, project_map, opts), + :ok <- check_compliance_threshold(validation_result, opts) do + + output_results(validation_result, opts) + + if has_violations?(validation_result, opts[:strict]) do + System.halt(1) + end + else + {:error, reason} -> + Mix.shell().error("❌ Validation failed: #{reason}") + System.halt(1) + + {:compliance_failure, score, threshold} -> + Mix.shell().error("❌ Compliance score #{score}% below threshold #{threshold}%") + System.halt(1) + end + end + + defp ensure_project_map(project_path, opts \\ []) do + map_file = Path.join(project_path, ".wdd_project_map.yaml") + force_remap = opts[:remap] || opts[:force_discovery] + + cond do + force_remap -> + unless opts[:quiet] do + Mix.shell().info("🔄 Re-mapping project structure as requested...") + end + perform_discovery(project_path, map_file, opts) + + File.exists?(map_file) -> + unless opts[:quiet] do + Mix.shell().info("📋 Using existing WDD project map") + end + ProjectMapper.load_project_map(map_file) + + true -> + unless opts[:quiet] do + Mix.shell().info("📂 No WDD project map found. Starting discovery session...") + end + perform_discovery(project_path, map_file, opts) + end + end + + defp perform_discovery(project_path, map_file, opts) do + case ProjectMapper.discover_project_structure(project_path) do + {:ok, project_map} -> + ProjectMapper.save_project_map(project_map, map_file) + unless opts[:quiet] do + Mix.shell().info("✅ Project map created at #{map_file}") + end + {:ok, project_map} + + error -> + error + end + end + + defp run_validation(project_path, project_map, opts) do + cond do + opts[:file] -> + validate_single_file(opts[:file], project_map, opts) + + opts[:layer] -> + validate_layer(project_path, project_map, opts[:layer], opts) + + true -> + validate_entire_project(project_path, project_map, opts) + end + end + + defp validate_single_file(file_path, project_map, opts) do + if not opts[:quiet] do + Mix.shell().info("🔍 Validating file: #{file_path}") + end + + case File.exists?(file_path) do + true -> + result = WddValidator.validate_file(file_path, project_map) + + validation_result = %WddValidator{ + project_map: project_map, + validation_results: [result], + compliance_score: result.score, + violations: result.violations, + recommendations: result.recommendations + } + + {:ok, validation_result} + + false -> + {:error, "File not found: #{file_path}"} + end + end + + defp validate_layer(project_path, project_map, layer_name, opts) do + layer_atom = String.to_existing_atom(layer_name) + + if not opts[:quiet] do + Mix.shell().info("🔍 Validating layer: #{layer_name}") + end + + layer_path = Map.get(project_map.layer_paths, layer_atom) + + if layer_path do + full_layer_path = Path.join(project_path, layer_path) + + case validate_directory(full_layer_path, project_map, opts) do + {:ok, validation_result} -> {:ok, validation_result} + error -> error + end + else + {:error, "Layer '#{layer_name}' not found in project map"} + end + end + + defp validate_entire_project(project_path, project_map, opts) do + if not opts[:quiet] do + Mix.shell().info("🔍 Validating entire project...") + end + + WddValidator.validate_project(project_path) + end + + defp validate_directory(directory_path, project_map, _opts) do + if File.dir?(directory_path) do + elixir_files = + directory_path + |> Path.join("**/*.{ex,exs}") + |> Path.wildcard() + + validation_results = Enum.map(elixir_files, fn file_path -> + WddValidator.validate_file(file_path, project_map) + end) + + compliance_score = calculate_average_score(validation_results) + violations = Enum.flat_map(validation_results, & &1.violations) + recommendations = Enum.flat_map(validation_results, & &1.recommendations) + + result = %WddValidator{ + project_map: project_map, + validation_results: validation_results, + compliance_score: compliance_score, + violations: violations, + recommendations: recommendations + } + + {:ok, result} + else + {:error, "Directory not found: #{directory_path}"} + end + end + + defp check_compliance_threshold(validation_result, opts) do + case opts[:min_score] do + nil -> :ok + threshold when is_float(threshold) -> + if validation_result.compliance_score >= threshold do + :ok + else + {:compliance_failure, validation_result.compliance_score, threshold} + end + end + end + + defp output_results(validation_result, opts) do + case opts[:output] do + "json" -> output_json(validation_result, opts) + "html" -> output_html(validation_result, opts) + _ -> output_text(validation_result, opts) + end + end + + defp output_text(validation_result, opts) do + unless opts[:quiet] do + Mix.shell().info("\n📊 Validation Results") + Mix.shell().info("=" |> String.duplicate(30)) + + Mix.shell().info("Files analyzed: #{length(validation_result.validation_results)}") + Mix.shell().info("Compliance score: #{Float.round(validation_result.compliance_score, 1)}%") + Mix.shell().info("Total violations: #{length(validation_result.violations)}") + end + + if not Enum.empty?(validation_result.violations) do + output_violations(validation_result.violations, opts) + end + + if not Enum.empty?(validation_result.recommendations) and not opts[:quiet] do + output_recommendations(validation_result.recommendations, opts) + end + + output_summary(validation_result, opts) + end + + defp output_violations(violations, opts) do + unless opts[:quiet] do + Mix.shell().info("\n🚨 Violations Found") + Mix.shell().info("-" |> String.duplicate(20)) + end + + violations + |> Enum.group_by(& &1.severity) + |> Enum.each(fn {severity, severity_violations} -> + output_violations_by_severity(severity, severity_violations, opts) + end) + end + + defp output_violations_by_severity(severity, violations, opts) do + severity_icon = case severity do + :error -> "🔴" + :warning -> "🟡" + :info -> "🔵" + end + + unless opts[:quiet] do + Mix.shell().info("\n#{severity_icon} #{String.upcase(to_string(severity))} (#{length(violations)})") + end + + violations + |> Enum.take(if opts[:verbose], do: length(violations), else: 10) + |> Enum.each(fn violation -> + location = if violation.line, do: ":#{violation.line}", else: "" + file_location = "#{violation.file}#{location}" + + message = if opts[:verbose] do + " #{file_location}\n #{violation.message}\n Rule: #{violation.rule}" + else + " #{file_location}: #{violation.message}" + end + + case severity do + :error -> Mix.shell().error(message) + :warning -> Mix.shell().info(message) + :info -> Mix.shell().info(message) + end + end) + + if not opts[:verbose] and length(violations) > 10 do + Mix.shell().info(" ... and #{length(violations) - 10} more") + end + end + + defp output_recommendations(recommendations, _opts) do + Mix.shell().info("\n💡 Recommendations") + Mix.shell().info("-" |> String.duplicate(20)) + + recommendations + |> Enum.uniq() + |> Enum.with_index(1) + |> Enum.each(fn {recommendation, index} -> + Mix.shell().info("#{index}. #{recommendation}") + end) + end + + defp output_summary(validation_result, opts) do + score = validation_result.compliance_score + violations = validation_result.violations + + error_count = Enum.count(violations, & &1.severity == :error) + warning_count = Enum.count(violations, & &1.severity == :warning) + info_count = Enum.count(violations, & &1.severity == :info) + + unless opts[:quiet] do + Mix.shell().info("\n📋 Summary") + Mix.shell().info("-" |> String.duplicate(10)) + Mix.shell().info("Compliance Score: #{Float.round(score, 1)}%") + Mix.shell().info("Errors: #{error_count}") + Mix.shell().info("Warnings: #{warning_count}") + Mix.shell().info("Info: #{info_count}") + end + + cond do + error_count > 0 -> + Mix.shell().error("\n❌ Validation failed with #{error_count} error(s)") + + warning_count > 0 and opts[:strict] -> + Mix.shell().error("\n❌ Validation failed with #{warning_count} warning(s) (strict mode)") + + score >= 90.0 -> + Mix.shell().info("\n✅ Excellent WDD compliance!") + + score >= 75.0 -> + Mix.shell().info("\n✅ Good WDD compliance") + + score >= 50.0 -> + Mix.shell().info("\n⚠️ Moderate WDD compliance - consider improvements") + + true -> + Mix.shell().error("\n❌ Poor WDD compliance - refactoring recommended") + end + end + + defp output_json(validation_result, _opts) do + json_output = %{ + compliance_score: validation_result.compliance_score, + files_analyzed: length(validation_result.validation_results), + total_violations: length(validation_result.violations), + violations_by_severity: count_violations_by_severity(validation_result.violations), + violations: format_violations_for_json(validation_result.violations), + recommendations: validation_result.recommendations, + summary: generate_summary_text(validation_result) + } + + Mix.shell().info(Jason.encode!(json_output, pretty: true)) + end + + defp output_html(_validation_result, _opts) do + Mix.shell().info("HTML output not yet implemented") + end + + defp has_violations?(validation_result, strict_mode) do + violations = validation_result.violations + + error_count = Enum.count(violations, & &1.severity == :error) + warning_count = Enum.count(violations, & &1.severity == :warning) + + error_count > 0 or (strict_mode and warning_count > 0) + end + + defp calculate_average_score(validation_results) do + if Enum.empty?(validation_results) do + 0.0 + else + total_score = Enum.reduce(validation_results, 0.0, fn result, acc -> + acc + result.score + end) + + total_score / length(validation_results) + end + end + + defp count_violations_by_severity(violations) do + violations + |> Enum.group_by(& &1.severity) + |> Map.new(fn {severity, violations_list} -> + {severity, length(violations_list)} + end) + end + + defp format_violations_for_json(violations) do + Enum.map(violations, fn violation -> + %{ + type: violation.type, + severity: violation.severity, + file: violation.file, + line: violation.line, + message: violation.message, + rule: violation.rule + } + end) + end + + defp generate_summary_text(validation_result) do + score = validation_result.compliance_score + + cond do + score >= 90.0 -> "Excellent WDD compliance" + score >= 75.0 -> "Good WDD compliance" + score >= 50.0 -> "Moderate WDD compliance" + true -> "Poor WDD compliance" + end + end + + defp show_help do + Mix.shell().info(@moduledoc) + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/project_mapper.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/project_mapper.ex new file mode 100644 index 0000000..5f75ff0 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/project_mapper.ex @@ -0,0 +1,529 @@ +defmodule WorkerBee.ProjectMapper do + @moduledoc """ + Interactive project structure discovery and WDD layer mapping. + + This module conducts discovery sessions to understand how a specific + Elixir project should be organized according to WDD principles. + """ + + @project_types [ + :phoenix_web, + :phoenix_api, + :otp_application, + :library, + :nerves, + :umbrella, + :poncho, + :livebook + ] + + @wdd_layers [ + :data, + :functions, + :tests, + :boundaries, + :lifecycles, + :workers + ] + + defstruct [ + :project_name, + :project_type, + :root_path, + :layer_paths, + :framework_considerations, + :naming_conventions, + :discovered_patterns + ] + + @type t :: %__MODULE__{ + project_name: String.t(), + project_type: atom(), + root_path: String.t(), + layer_paths: %{atom() => String.t()}, + framework_considerations: [String.t()], + naming_conventions: %{atom() => String.t()}, + discovered_patterns: [String.t()] + } + + @doc """ + Starts an interactive mapping session to discover project structure. + """ + @spec discover_project_structure(String.t()) :: {:ok, t()} | {:error, String.t()} + def discover_project_structure(project_path) do + with {:ok, project_info} <- scan_project_structure(project_path), + {:ok, project_type} <- determine_project_type(project_info), + {:ok, layer_mapping} <- conduct_interactive_mapping(project_type, project_info) do + + project_map = %__MODULE__{ + project_name: extract_project_name(project_path), + project_type: project_type, + root_path: project_path, + layer_paths: layer_mapping.layer_paths, + framework_considerations: layer_mapping.framework_considerations, + naming_conventions: layer_mapping.naming_conventions, + discovered_patterns: project_info.discovered_patterns + } + + {:ok, project_map} + end + end + + @doc """ + Scans the current project structure to identify existing patterns. + """ + def scan_project_structure(project_path) do + patterns = %{ + has_mix_exs: File.exists?(Path.join(project_path, "mix.exs")), + has_lib_dir: File.dir?(Path.join(project_path, "lib")), + has_test_dir: File.dir?(Path.join(project_path, "test")), + has_phoenix: detect_phoenix_project(project_path), + has_otp_app: detect_otp_application(project_path), + lib_structure: scan_lib_directory(project_path), + test_structure: scan_test_directory(project_path), + existing_modules: discover_existing_modules(project_path) + } + + discovered_patterns = analyze_existing_patterns(patterns) + + {:ok, %{ + patterns: patterns, + discovered_patterns: discovered_patterns + }} + end + + @doc """ + Determines the project type based on scanned information. + """ + def determine_project_type(project_info) do + cond do + project_info.patterns.has_phoenix and has_web_features?(project_info) -> + {:ok, :phoenix_web} + + project_info.patterns.has_phoenix -> + {:ok, :phoenix_api} + + project_info.patterns.has_otp_app and has_supervision_tree?(project_info) -> + {:ok, :otp_application} + + is_library_project?(project_info) -> + {:ok, :library} + + has_umbrella_structure?(project_info) -> + {:ok, :umbrella} + + true -> + {:ok, :otp_application} # Default fallback + end + end + + @doc """ + Conducts interactive session to map WDD layers to project structure. + """ + def conduct_interactive_mapping(project_type, project_info) do + IO.puts("\n🐝 Worker-Bee WDD Project Structure Discovery") + IO.puts("=" |> String.duplicate(50)) + + IO.puts("\nProject Type Detected: #{format_project_type(project_type)}") + display_discovered_patterns(project_info.discovered_patterns) + + layer_paths = gather_layer_preferences(project_type, project_info) + naming_conventions = gather_naming_conventions() + framework_considerations = gather_framework_considerations(project_type) + + {:ok, %{ + layer_paths: layer_paths, + naming_conventions: naming_conventions, + framework_considerations: framework_considerations + }} + end + + @doc """ + Saves the project mapping to a configuration file. + """ + def save_project_map(project_map, output_path \\ ".wdd_project_map.yaml") do + yaml_content = generate_yaml_config(project_map) + + case File.write(output_path, yaml_content) do + :ok -> + {:ok, "Project map saved to #{output_path}"} + {:error, reason} -> + {:error, "Failed to save project map: #{reason}"} + end + end + + @doc """ + Loads an existing project mapping from configuration file. + """ + def load_project_map(config_path \\ ".wdd_project_map.yaml") do + case File.read(config_path) do + {:ok, content} -> parse_yaml_config(content) + {:error, :enoent} -> {:error, "No project map found. Run project discovery first."} + {:error, reason} -> {:error, "Failed to load project map: #{reason}"} + end + end + + # Private helper functions + + defp detect_phoenix_project(project_path) do + mix_exs = Path.join(project_path, "mix.exs") + + case File.read(mix_exs) do + {:ok, content} -> String.contains?(content, ":phoenix") + _ -> false + end + end + + defp detect_otp_application(project_path) do + app_file = Path.join([project_path, "lib", "**", "application.ex"]) + !Enum.empty?(Path.wildcard(app_file)) + end + + defp scan_lib_directory(project_path) do + lib_path = Path.join(project_path, "lib") + + if File.dir?(lib_path) do + lib_path + |> Path.join("**/*.ex") + |> Path.wildcard() + |> Enum.map(&Path.relative_to(&1, lib_path)) + |> analyze_lib_structure() + else + %{} + end + end + + defp scan_test_directory(project_path) do + test_path = Path.join(project_path, "test") + + if File.dir?(test_path) do + test_path + |> Path.join("**/*_test.exs") + |> Path.wildcard() + |> Enum.map(&Path.relative_to(&1, test_path)) + else + [] + end + end + + defp discover_existing_modules(project_path) do + lib_path = Path.join(project_path, "lib") + + if File.dir?(lib_path) do + lib_path + |> Path.join("**/*.ex") + |> Path.wildcard() + |> Enum.map(&extract_module_info/1) + |> Enum.reject(&is_nil/1) + else + [] + end + end + + defp analyze_existing_patterns(patterns) do + discovered = [] + + discovered = if patterns.has_phoenix, do: ["Phoenix framework detected"] ++ discovered, else: discovered + discovered = if patterns.has_otp_app, do: ["OTP application structure"] ++ discovered, else: discovered + discovered = if has_functional_core_pattern?(patterns), do: ["Functional core pattern found"] ++ discovered, else: discovered + discovered = if has_boundary_pattern?(patterns), do: ["Boundary layer pattern found"] ++ discovered, else: discovered + + discovered + end + + defp gather_layer_preferences(project_type, project_info) do + IO.puts("\n📂 WDD Layer Structure Configuration") + IO.puts("Let's define where each WDD layer should live in your project.\n") + + suggested_paths = get_suggested_paths(project_type) + + Enum.reduce(@wdd_layers, %{}, fn layer, acc -> + suggestion = Map.get(suggested_paths, layer, "lib/#{layer}") + + IO.puts("#{format_layer_name(layer)} Layer:") + IO.puts(" Suggested: #{suggestion}") + + prompt = " Your choice (press Enter for suggestion): " + user_input = IO.gets(prompt) |> String.trim() + + chosen_path = if user_input == "", do: suggestion, else: user_input + + Map.put(acc, layer, chosen_path) + end) + end + + defp gather_naming_conventions do + IO.puts("\n🏷️ Naming Convention Preferences") + + %{ + module_prefix: get_user_preference("Module prefix (e.g., MyApp)", ""), + functional_core_suffix: get_user_preference("Functional core suffix", "Core"), + boundary_suffix: get_user_preference("Boundary module suffix", ""), + test_suffix: get_user_preference("Test module suffix", "Test") + } + end + + defp gather_framework_considerations(project_type) do + considerations = [] + + considerations = case project_type do + :phoenix_web -> ["Phoenix contexts as boundary layers", "LiveView components"] ++ considerations + :phoenix_api -> ["Phoenix contexts as boundary layers", "JSON API design"] ++ considerations + :otp_application -> ["GenServer supervision", "Application callbacks"] ++ considerations + :library -> ["Pure functional design", "No process machinery"] ++ considerations + _ -> considerations + end + + IO.puts("\n⚙️ Framework Considerations:") + Enum.each(considerations, fn consideration -> + IO.puts(" • #{consideration}") + end) + + considerations + end + + defp get_suggested_paths(:phoenix_web) do + %{ + data: "lib/my_app/types", + functions: "lib/my_app_web/functional_core", + tests: "test", + boundaries: "lib/my_app_web", + lifecycles: "lib/my_app/application.ex", + workers: "lib/my_app/workers" + } + end + + defp get_suggested_paths(:phoenix_api) do + %{ + data: "lib/my_app/types", + functions: "lib/my_app/functional_core", + tests: "test", + boundaries: "lib/my_app_web", + lifecycles: "lib/my_app/application.ex", + workers: "lib/my_app/workers" + } + end + + defp get_suggested_paths(:otp_application) do + %{ + data: "lib/my_app/types", + functions: "lib/my_app/core", + tests: "test", + boundaries: "lib/my_app/boundary", + lifecycles: "lib/my_app/application.ex", + workers: "lib/my_app/workers" + } + end + + defp get_suggested_paths(:library) do + %{ + data: "lib/my_lib/types", + functions: "lib/my_lib", + tests: "test", + boundaries: "lib/my_lib/api", + lifecycles: "N/A (library)", + workers: "N/A (library)" + } + end + + defp get_suggested_paths(_) do + %{ + data: "lib/data", + functions: "lib/core", + tests: "test", + boundaries: "lib/boundary", + lifecycles: "lib/application.ex", + workers: "lib/workers" + } + end + + defp get_user_preference(prompt, default) do + full_prompt = if default != "", do: "#{prompt} [#{default}]: ", else: "#{prompt}: " + user_input = IO.gets(full_prompt) |> String.trim() + + if user_input == "", do: default, else: user_input + end + + defp extract_project_name(project_path) do + project_path + |> Path.basename() + |> String.replace("-", "_") + end + + defp format_project_type(type) do + type + |> Atom.to_string() + |> String.replace("_", " ") + |> String.split() + |> Enum.map(&String.capitalize/1) + |> Enum.join(" ") + end + + defp format_layer_name(layer) do + layer + |> Atom.to_string() + |> String.capitalize() + end + + defp display_discovered_patterns(patterns) do + if not Enum.empty?(patterns) do + IO.puts("\n🔍 Discovered Patterns:") + Enum.each(patterns, fn pattern -> + IO.puts(" • #{pattern}") + end) + end + end + + defp generate_yaml_config(project_map) do + """ + # WDD Project Structure Map + # Generated by Worker-Bee Agent + + project_name: "#{project_map.project_name}" + project_type: #{project_map.project_type} + root_path: "#{project_map.root_path}" + + wdd_layers: + #{format_layer_paths_yaml(project_map.layer_paths)} + + naming_conventions: + #{format_naming_conventions_yaml(project_map.naming_conventions)} + + framework_considerations: + #{format_framework_considerations_yaml(project_map.framework_considerations)} + + discovered_patterns: + #{format_discovered_patterns_yaml(project_map.discovered_patterns)} + """ + end + + defp format_layer_paths_yaml(layer_paths) do + Enum.map(layer_paths, fn {layer, path} -> + " #{layer}: \"#{path}\"" + end) + |> Enum.join("\n") + end + + defp format_naming_conventions_yaml(naming_conventions) do + Enum.map(naming_conventions, fn {key, value} -> + " #{key}: \"#{value}\"" + end) + |> Enum.join("\n") + end + + defp format_framework_considerations_yaml(considerations) do + Enum.map(considerations, fn consideration -> + " - \"#{consideration}\"" + end) + |> Enum.join("\n") + end + + defp format_discovered_patterns_yaml(patterns) do + Enum.map(patterns, fn pattern -> + " - \"#{pattern}\"" + end) + |> Enum.join("\n") + end + + # Additional helper functions for pattern detection + defp has_web_features?(project_info) do + lib_files = project_info.patterns.lib_structure + + web_indicators = [ + "router.ex", + "endpoint.ex", + "controllers/", + "views/", + "templates/", + "live/" + ] + + Enum.any?(web_indicators, fn indicator -> + Enum.any?(Map.keys(lib_files), fn file -> + String.contains?(file, indicator) + end) + end) + end + + defp has_supervision_tree?(project_info) do + Enum.any?(project_info.patterns.existing_modules, fn module_info -> + String.contains?(module_info.content || "", "Supervisor") + end) + end + + defp is_library_project?(project_info) do + not project_info.patterns.has_otp_app and + not project_info.patterns.has_phoenix + end + + defp has_umbrella_structure?(project_info) do + File.dir?(Path.join(project_info.patterns.root_path || ".", "apps")) + end + + defp has_functional_core_pattern?(patterns) do + lib_files = patterns.lib_structure + + core_indicators = [ + "core/", + "functional_core/", + "business/" + ] + + Enum.any?(core_indicators, fn indicator -> + Enum.any?(Map.keys(lib_files), fn file -> + String.contains?(file, indicator) + end) + end) + end + + defp has_boundary_pattern?(patterns) do + lib_files = patterns.lib_structure + + boundary_indicators = [ + "boundary/", + "api/", + "web/", + "controllers/" + ] + + Enum.any?(boundary_indicators, fn indicator -> + Enum.any?(Map.keys(lib_files), fn file -> + String.contains?(file, indicator) + end) + end) + end + + defp analyze_lib_structure(file_paths) do + Enum.reduce(file_paths, %{}, fn file_path, acc -> + directory = Path.dirname(file_path) + files = Map.get(acc, directory, []) + Map.put(acc, directory, [file_path | files]) + end) + end + + defp extract_module_info(file_path) do + case File.read(file_path) do + {:ok, content} -> + %{ + path: file_path, + content: content, + module_name: extract_module_name(content) + } + _ -> + nil + end + end + + defp extract_module_name(content) do + case Regex.run(~r/defmodule\s+([\w\.]+)/, content) do + [_, module_name] -> module_name + _ -> nil + end + end + + defp parse_yaml_config(content) do + # Simple YAML parsing for basic project map structure + # In a real implementation, you'd use a YAML library + {:ok, %__MODULE__{}} + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/template_generator.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/template_generator.ex new file mode 100644 index 0000000..7352028 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/template_generator.ex @@ -0,0 +1,905 @@ +defmodule WorkerBee.TemplateGenerator do + @moduledoc """ + Code scaffolding and template generation for WDD-compliant modules. + + Generates Elixir modules following Worker-Bee Driven Design principles + based on the project's established structure and conventions. + """ + + alias WorkerBee.ProjectMapper + + @template_types [ + :functional_core, + :boundary_genserver, + :boundary_api, + :data_struct, + :test_functional, + :test_boundary, + :worker_process, + :lifecycle_supervisor + ] + + defstruct [ + :project_map, + :template_type, + :module_name, + :target_path, + :template_vars, + :generated_content + ] + + @type t :: %__MODULE__{ + project_map: ProjectMapper.t(), + template_type: atom(), + module_name: String.t(), + target_path: String.t(), + template_vars: map(), + generated_content: String.t() + } + + @doc """ + Scaffolds a new WDD component with all related files. + """ + @spec scaffold_component(String.t(), String.t(), atom(), map()) :: {:ok, [String.t()]} | {:error, String.t()} + def scaffold_component(project_path, component_name, component_type, options \\ %{}) do + with {:ok, project_map} <- ProjectMapper.load_project_map(Path.join(project_path, ".wdd_project_map.yaml")), + {:ok, files} <- generate_component_files(component_name, component_type, project_map, options) do + + created_files = Enum.map(files, fn {file_path, content} -> + File.mkdir_p!(Path.dirname(file_path)) + File.write!(file_path, content) + file_path + end) + + {:ok, created_files} + end + end + + @doc """ + Generates a single template file. + """ + @spec generate_template(atom(), String.t(), ProjectMapper.t(), map()) :: {:ok, t()} | {:error, String.t()} + def generate_template(template_type, module_name, project_map, vars \\ %{}) do + with {:ok, template_content} <- get_template_content(template_type), + {:ok, target_path} <- determine_target_path(template_type, module_name, project_map), + template_vars <- build_template_vars(module_name, project_map, vars), + generated_content <- render_template(template_content, template_vars) do + + generator_result = %__MODULE__{ + project_map: project_map, + template_type: template_type, + module_name: module_name, + target_path: target_path, + template_vars: template_vars, + generated_content: generated_content + } + + {:ok, generator_result} + end + end + + @doc """ + Lists available template types. + """ + def available_templates, do: @template_types + + @doc """ + Generates a complete functional core module. + """ + def generate_functional_core(module_name, project_map, options \\ %{}) do + template_vars = %{ + module_name: module_name, + module_prefix: get_module_prefix(project_map), + functions: Map.get(options, :functions, ["new/1", "update/2"]), + data_types: Map.get(options, :data_types, []), + with_specs: Map.get(options, :with_specs, true), + with_docs: Map.get(options, :with_docs, true) + } + + generate_template(:functional_core, module_name, project_map, template_vars) + end + + @doc """ + Generates a boundary layer GenServer. + """ + def generate_boundary_genserver(module_name, project_map, options \\ %{}) do + template_vars = %{ + module_name: module_name, + module_prefix: get_module_prefix(project_map), + state_type: Map.get(options, :state_type, "map()"), + api_functions: Map.get(options, :api_functions, ["start_link/1", "get_state/1"]), + callbacks: Map.get(options, :callbacks, ["handle_call", "handle_cast"]), + with_registry: Map.get(options, :with_registry, false) + } + + generate_template(:boundary_genserver, module_name, project_map, template_vars) + end + + @doc """ + Generates a data structure module. + """ + def generate_data_struct(module_name, project_map, options \\ %{}) do + template_vars = %{ + module_name: module_name, + module_prefix: get_module_prefix(project_map), + fields: Map.get(options, :fields, []), + with_defaults: Map.get(options, :with_defaults, true), + with_typespec: Map.get(options, :with_typespec, true), + with_constructor: Map.get(options, :with_constructor, true) + } + + generate_template(:data_struct, module_name, project_map, template_vars) + end + + @doc """ + Generates test files for a given module. + """ + def generate_tests(module_name, module_type, project_map, options \\ %{}) do + template_type = case module_type do + :functional_core -> :test_functional + :boundary -> :test_boundary + _ -> :test_functional + end + + template_vars = %{ + module_name: module_name, + module_prefix: get_module_prefix(project_map), + test_type: module_type, + with_describe_blocks: Map.get(options, :with_describe_blocks, true), + with_setup: Map.get(options, :with_setup, false), + test_functions: Map.get(options, :test_functions, []) + } + + generate_template(template_type, "#{module_name}Test", project_map, template_vars) + end + + # Private helper functions + + defp generate_component_files(component_name, component_type, project_map, options) do + files = case component_type do + :complete_wdd_component -> + [ + generate_data_struct("#{component_name}Data", project_map, options), + generate_functional_core("#{component_name}Core", project_map, options), + generate_boundary_genserver("#{component_name}Server", project_map, options), + generate_tests("#{component_name}Core", :functional_core, project_map, options), + generate_tests("#{component_name}Server", :boundary, project_map, options) + ] + + :functional_component -> + [ + generate_functional_core(component_name, project_map, options), + generate_tests(component_name, :functional_core, project_map, options) + ] + + :boundary_component -> + [ + generate_boundary_genserver(component_name, project_map, options), + generate_tests(component_name, :boundary, project_map, options) + ] + + _ -> + [generate_template(component_type, component_name, project_map, options)] + end + + # Resolve all file generation results + resolved_files = Enum.reduce(files, [], fn + {:ok, generator_result}, acc -> + [{generator_result.target_path, generator_result.generated_content} | acc] + + {:error, _reason}, acc -> + acc + end) + + {:ok, Enum.reverse(resolved_files)} + end + + defp get_template_content(template_type) do + case template_type do + :functional_core -> {:ok, functional_core_template()} + :boundary_genserver -> {:ok, boundary_genserver_template()} + :boundary_api -> {:ok, boundary_api_template()} + :data_struct -> {:ok, data_struct_template()} + :test_functional -> {:ok, test_functional_template()} + :test_boundary -> {:ok, test_boundary_template()} + :worker_process -> {:ok, worker_process_template()} + :lifecycle_supervisor -> {:ok, lifecycle_supervisor_template()} + _ -> {:error, "Unknown template type: #{template_type}"} + end + end + + defp determine_target_path(template_type, module_name, project_map) do + layer_paths = project_map.layer_paths + base_path = project_map.root_path + + relative_path = case template_type do + :functional_core -> + Path.join([Map.get(layer_paths, :functions, "lib/core"), "#{Macro.underscore(module_name)}.ex"]) + + :boundary_genserver -> + Path.join([Map.get(layer_paths, :boundaries, "lib/boundary"), "#{Macro.underscore(module_name)}.ex"]) + + :boundary_api -> + Path.join([Map.get(layer_paths, :boundaries, "lib/boundary"), "#{Macro.underscore(module_name)}.ex"]) + + :data_struct -> + Path.join([Map.get(layer_paths, :data, "lib/types"), "#{Macro.underscore(module_name)}.ex"]) + + :test_functional -> + Path.join([Map.get(layer_paths, :tests, "test"), "#{Macro.underscore(module_name)}_test.exs"]) + + :test_boundary -> + Path.join([Map.get(layer_paths, :tests, "test"), "#{Macro.underscore(module_name)}_test.exs"]) + + :worker_process -> + Path.join([Map.get(layer_paths, :workers, "lib/workers"), "#{Macro.underscore(module_name)}.ex"]) + + :lifecycle_supervisor -> + Path.join([Map.get(layer_paths, :lifecycles, "lib"), "#{Macro.underscore(module_name)}.ex"]) + end + + {:ok, Path.join(base_path, relative_path)} + end + + defp build_template_vars(module_name, project_map, additional_vars) do + base_vars = %{ + module_name: module_name, + module_prefix: get_module_prefix(project_map), + project_name: project_map.project_name, + underscore_name: Macro.underscore(module_name), + timestamp: DateTime.utc_now() |> DateTime.to_iso8601(), + author: "Generated by Worker-Bee Agent" + } + + Map.merge(base_vars, additional_vars) + end + + defp render_template(template_content, vars) do + Enum.reduce(vars, template_content, fn {key, value}, acc -> + placeholder = "{{#{key}}}" + String.replace(acc, placeholder, to_string(value)) + end) + end + + defp get_module_prefix(project_map) do + project_map.naming_conventions + |> Map.get(:module_prefix, "") + |> case do + "" -> Macro.camelize(project_map.project_name) + prefix -> prefix + end + end + + # Template definitions + + defp functional_core_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + Functional core module for {{module_name}}. + + This module contains pure business logic without side effects, + following Worker-Bee Driven Design principles. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + @type t :: %__MODULE__{} + + defstruct [] + + @doc \"\"\" + Creates a new {{underscore_name}}. + \"\"\" + @spec new(map()) :: {:ok, t()} | {:error, String.t()} + def new(attrs \\\\ %{}) do + # Implementation here + {:ok, %__MODULE__{}} + end + + @doc \"\"\" + Updates a {{underscore_name}} with new attributes. + \"\"\" + @spec update(t(), map()) :: {:ok, t()} | {:error, String.t()} + def update(%__MODULE__{} = {{underscore_name}}, attrs) do + # Implementation here + {:ok, {{underscore_name}}} + end + + @doc \"\"\" + Validates a {{underscore_name}}. + \"\"\" + @spec validate(t()) :: {:ok, t()} | {:error, String.t()} + def validate(%__MODULE__{} = {{underscore_name}}) do + # Validation logic here + {:ok, {{underscore_name}}} + end + + # Private helper functions + + defp do_something(data) do + # Pure function implementation + data + end + end + """ + end + + defp boundary_genserver_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + Boundary layer GenServer for {{module_name}}. + + This module manages state and side effects while delegating + business logic to the functional core. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + use GenServer + + alias {{module_prefix}}.{{module_name}}Core + + @type state :: map() + + # Client API + + @doc \"\"\" + Starts the {{module_name}} server. + \"\"\" + @spec start_link(keyword()) :: GenServer.on_start() + def start_link(opts \\\\ []) do + name = Keyword.get(opts, :name, __MODULE__) + GenServer.start_link(__MODULE__, opts, name: name) + end + + @doc \"\"\" + Gets the current state. + \"\"\" + @spec get_state(GenServer.server()) :: state() + def get_state(server \\\\ __MODULE__) do + GenServer.call(server, :get_state) + end + + @doc \"\"\" + Performs an operation on the {{underscore_name}}. + \"\"\" + @spec perform_operation(GenServer.server(), term()) :: {:ok, term()} | {:error, String.t()} + def perform_operation(server \\\\ __MODULE__, params) do + GenServer.call(server, {:perform_operation, params}) + end + + # Server Callbacks + + @impl true + def init(opts) do + initial_state = %{ + # Initialize state here + } + + {:ok, initial_state} + end + + @impl true + def handle_call(:get_state, _from, state) do + {:reply, state, state} + end + + @impl true + def handle_call({:perform_operation, params}, _from, state) do + with {:ok, result} <- {{module_name}}Core.perform_operation(params) do + new_state = update_state(state, result) + {:reply, {:ok, result}, new_state} + else + {:error, reason} -> + {:reply, {:error, reason}, state} + end + end + + @impl true + def handle_cast({:async_operation, params}, state) do + # Handle async operations + {:noreply, state} + end + + @impl true + def handle_info(msg, state) do + # Handle info messages + {:noreply, state} + end + + # Private helper functions + + defp update_state(state, _result) do + # State update logic + state + end + end + """ + end + + defp boundary_api_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + API boundary for {{module_name}}. + + This module provides a clean API interface that handles + validation and delegates to the functional core. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + alias {{module_prefix}}.{{module_name}}Core + + @doc \"\"\" + Creates a new {{underscore_name}}. + \"\"\" + @spec create(map()) :: {:ok, term()} | {:error, String.t()} + def create(attrs) do + with {:ok, validated_attrs} <- validate_attrs(attrs), + {:ok, result} <- {{module_name}}Core.create(validated_attrs) do + {:ok, result} + end + end + + @doc \"\"\" + Updates an existing {{underscore_name}}. + \"\"\" + @spec update(String.t(), map()) :: {:ok, term()} | {:error, String.t()} + def update(id, attrs) do + with {:ok, validated_id} <- validate_id(id), + {:ok, validated_attrs} <- validate_attrs(attrs), + {:ok, result} <- {{module_name}}Core.update(validated_id, validated_attrs) do + {:ok, result} + end + end + + @doc \"\"\" + Retrieves a {{underscore_name}} by ID. + \"\"\" + @spec get(String.t()) :: {:ok, term()} | {:error, String.t()} + def get(id) do + with {:ok, validated_id} <- validate_id(id) do + {{module_name}}Core.get(validated_id) + end + end + + # Private validation functions + + defp validate_attrs(attrs) when is_map(attrs) do + # Validation logic here + {:ok, attrs} + end + + defp validate_attrs(_), do: {:error, "Invalid attributes format"} + + defp validate_id(id) when is_binary(id) and id != "" do + {:ok, id} + end + + defp validate_id(_), do: {:error, "Invalid ID format"} + end + """ + end + + defp data_struct_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + Data structure for {{module_name}}. + + This module defines the core data structure and related + functions following Worker-Bee Driven Design principles. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + @type t :: %__MODULE__{ + id: String.t() | nil, + name: String.t(), + created_at: DateTime.t(), + updated_at: DateTime.t() + } + + defstruct [ + :id, + :name, + created_at: nil, + updated_at: nil + ] + + @doc \"\"\" + Creates a new {{underscore_name}} struct. + \"\"\" + @spec new(map()) :: t() + def new(attrs \\\\ %{}) do + now = DateTime.utc_now() + + %__MODULE__{ + id: Map.get(attrs, :id), + name: Map.get(attrs, :name, ""), + created_at: Map.get(attrs, :created_at, now), + updated_at: Map.get(attrs, :updated_at, now) + } + end + + @doc \"\"\" + Updates a {{underscore_name}} struct with new attributes. + \"\"\" + @spec update(t(), map()) :: t() + def update(%__MODULE__{} = {{underscore_name}}, attrs) do + updated_attrs = Map.put(attrs, :updated_at, DateTime.utc_now()) + struct({{underscore_name}}, updated_attrs) + end + + @doc \"\"\" + Validates a {{underscore_name}} struct. + \"\"\" + @spec valid?(t()) :: boolean() + def valid?(%__MODULE__{name: name}) when is_binary(name) and name != "" do + true + end + + def valid?(_), do: false + end + """ + end + + defp test_functional_template do + """ + defmodule {{module_prefix}}.{{module_name}}Test do + @moduledoc \"\"\" + Tests for {{module_name}} functional core. + + These tests focus on behavior and business logic validation + without side effects or process machinery. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + use ExUnit.Case, async: true + + alias {{module_prefix}}.{{module_name}} + + describe "{{underscore_name}}/0" do + test "creates a new {{underscore_name}} with default values" do + result = {{module_name}}.new() + + assert {:ok, {{underscore_name}}} = result + assert %{{module_name}}{} = {{underscore_name}} + end + + test "creates a new {{underscore_name}} with provided attributes" do + attrs = %{name: "test {{underscore_name}}"} + + result = {{module_name}}.new(attrs) + + assert {:ok, {{underscore_name}}} = result + assert {{underscore_name}}.name == "test {{underscore_name}}" + end + end + + describe "update/2" do + test "updates {{underscore_name}} with new attributes" do + {:ok, {{underscore_name}}} = {{module_name}}.new(%{name: "original"}) + + result = {{module_name}}.update({{underscore_name}}, %{name: "updated"}) + + assert {:ok, updated_{{underscore_name}}} = result + assert updated_{{underscore_name}}.name == "updated" + end + + test "returns error for invalid attributes" do + {:ok, {{underscore_name}}} = {{module_name}}.new() + + result = {{module_name}}.update({{underscore_name}}, %{invalid: "attr"}) + + assert {:error, _reason} = result + end + end + + describe "validate/1" do + test "validates a valid {{underscore_name}}" do + {:ok, {{underscore_name}}} = {{module_name}}.new(%{name: "valid"}) + + result = {{module_name}}.validate({{underscore_name}}) + + assert {:ok, ^{{underscore_name}}} = result + end + + test "returns error for invalid {{underscore_name}}" do + {:ok, {{underscore_name}}} = {{module_name}}.new(%{name: ""}) + + result = {{module_name}}.validate({{underscore_name}}) + + assert {:error, _reason} = result + end + end + + # Helper functions for test data + + defp valid_{{underscore_name}}_attrs do + %{ + name: "Test {{module_name}}" + } + end + + defp invalid_{{underscore_name}}_attrs do + %{ + name: "" + } + end + end + """ + end + + defp test_boundary_template do + """ + defmodule {{module_prefix}}.{{module_name}}Test do + @moduledoc \"\"\" + Integration tests for {{module_name}} boundary layer. + + These tests exercise the process behavior and API + interactions of the boundary layer. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + use ExUnit.Case, async: true + + alias {{module_prefix}}.{{module_name}} + + setup do + {:ok, pid} = {{module_name}}.start_link() + %{server: pid} + end + + describe "start_link/1" do + test "starts the server successfully" do + assert {:ok, pid} = {{module_name}}.start_link() + assert Process.alive?(pid) + end + + test "can start named server" do + assert {:ok, _pid} = {{module_name}}.start_link(name: :test_server) + assert Process.whereis(:test_server) + end + end + + describe "get_state/1" do + test "returns current server state", %{server: server} do + state = {{module_name}}.get_state(server) + + assert is_map(state) + end + end + + describe "perform_operation/2" do + test "performs operation successfully", %{server: server} do + params = %{action: "test"} + + result = {{module_name}}.perform_operation(server, params) + + assert {:ok, _result} = result + end + + test "handles invalid parameters", %{server: server} do + invalid_params = %{invalid: "params"} + + result = {{module_name}}.perform_operation(server, invalid_params) + + assert {:error, _reason} = result + end + + test "maintains state consistency", %{server: server} do + initial_state = {{module_name}}.get_state(server) + + {{module_name}}.perform_operation(server, %{action: "test"}) + + final_state = {{module_name}}.get_state(server) + + # Assert state changes as expected + refute initial_state == final_state + end + end + + # Helper functions for test data + + defp valid_operation_params do + %{ + action: "test_action", + data: %{key: "value"} + } + end + + defp invalid_operation_params do + %{ + invalid: "parameters" + } + end + end + """ + end + + defp worker_process_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + Worker process for {{module_name}}. + + This module handles concurrent work and background processing + following Worker-Bee Driven Design principles. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + use GenServer + + alias {{module_prefix}}.{{module_name}}Core + + @type state :: %{ + queue: [term()], + processing: boolean(), + results: [term()] + } + + # Client API + + @doc \"\"\" + Starts the worker process. + \"\"\" + @spec start_link(keyword()) :: GenServer.on_start() + def start_link(opts \\\\ []) do + name = Keyword.get(opts, :name, __MODULE__) + GenServer.start_link(__MODULE__, opts, name: name) + end + + @doc \"\"\" + Adds work to the queue. + \"\"\" + @spec add_work(GenServer.server(), term()) :: :ok + def add_work(server \\\\ __MODULE__, work_item) do + GenServer.cast(server, {:add_work, work_item}) + end + + @doc \"\"\" + Gets the current status of the worker. + \"\"\" + @spec get_status(GenServer.server()) :: map() + def get_status(server \\\\ __MODULE__) do + GenServer.call(server, :get_status) + end + + # Server Callbacks + + @impl true + def init(_opts) do + state = %{ + queue: [], + processing: false, + results: [] + } + + {:ok, state} + end + + @impl true + def handle_call(:get_status, _from, state) do + status = %{ + queue_length: length(state.queue), + processing: state.processing, + results_count: length(state.results) + } + + {:reply, status, state} + end + + @impl true + def handle_cast({:add_work, work_item}, state) do + new_queue = state.queue ++ [work_item] + new_state = %{state | queue: new_queue} + + # Start processing if not already processing + if not state.processing do + send(self(), :process_next) + end + + {:noreply, new_state} + end + + @impl true + def handle_info(:process_next, %{queue: []} = state) do + # No work to process + {:noreply, %{state | processing: false}} + end + + @impl true + def handle_info(:process_next, %{queue: [work_item | rest]} = state) do + # Process work item using functional core + result = {{module_name}}Core.process_work(work_item) + + new_state = %{ + state | + queue: rest, + processing: true, + results: [result | state.results] + } + + # Continue processing if more work exists + if not Enum.empty?(rest) do + send(self(), :process_next) + else + new_state = %{new_state | processing: false} + end + + {:noreply, new_state} + end + end + """ + end + + defp lifecycle_supervisor_template do + """ + defmodule {{module_prefix}}.{{module_name}} do + @moduledoc \"\"\" + Supervisor for {{module_name}} lifecycle management. + + This module manages the lifecycle of child processes + following OTP supervision principles. + + Generated by Worker-Bee Agent on {{timestamp}} + \"\"\" + + use Supervisor + + @doc \"\"\" + Starts the supervisor. + \"\"\" + @spec start_link(keyword()) :: Supervisor.on_start() + def start_link(opts \\\\ []) do + name = Keyword.get(opts, :name, __MODULE__) + Supervisor.start_link(__MODULE__, opts, name: name) + end + + @impl true + def init(_opts) do + children = [ + # Define child processes here + # {{{module_prefix}}.SomeServer, []}, + # {{{module_prefix}}.SomeWorker, []} + ] + + opts = [strategy: :one_for_one, name: __MODULE__] + Supervisor.init(children, opts) + end + + @doc \"\"\" + Dynamically starts a child process. + \"\"\" + @spec start_child(module(), term()) :: Supervisor.on_start_child() + def start_child(module, args) do + child_spec = {module, args} + Supervisor.start_child(__MODULE__, child_spec) + end + + @doc \"\"\" + Stops a child process. + \"\"\" + @spec stop_child(pid()) :: :ok | {:error, term()} + def stop_child(pid) when is_pid(pid) do + Supervisor.terminate_child(__MODULE__, pid) + end + + @doc \"\"\" + Lists all child processes. + \"\"\" + @spec list_children() :: [Supervisor.child()] + def list_children do + Supervisor.which_children(__MODULE__) + end + end + """ + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/lib/wdd_validator.ex b/intent/plugins/claude/subagents/worker-bee/resources/lib/wdd_validator.ex new file mode 100644 index 0000000..5f9468a --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/lib/wdd_validator.ex @@ -0,0 +1,906 @@ +defmodule WorkerBee.WddValidator do + @moduledoc """ + WDD compliance validation engine. + + Validates Elixir code against Worker-Bee Driven Design principles + based on the project's established WDD layer mapping. + """ + + alias WorkerBee.ProjectMapper + + defstruct [ + :project_map, + :validation_results, + :compliance_score, + :violations, + :recommendations + ] + + @type validation_result :: %{ + file_path: String.t(), + layer: atom(), + violations: [violation()], + score: float(), + recommendations: [String.t()] + } + + @type violation :: %{ + type: atom(), + severity: :error | :warning | :info, + line: integer() | nil, + message: String.t(), + rule: String.t() + } + + @type t :: %__MODULE__{ + project_map: ProjectMapper.t(), + validation_results: [validation_result()], + compliance_score: float(), + violations: [violation()], + recommendations: [String.t()] + } + + @doc """ + Validates the entire project against WDD compliance. + """ + @spec validate_project(String.t()) :: {:ok, t()} | {:error, String.t()} + def validate_project(project_path) do + with {:ok, project_map} <- ProjectMapper.load_project_map(Path.join(project_path, ".wdd_project_map.yaml")), + {:ok, file_list} <- get_project_files(project_path), + validation_results <- validate_files(file_list, project_map), + structure_analysis <- analyze_project_structure_changes(file_list, project_map) do + + all_recommendations = generate_recommendations(validation_results) ++ structure_analysis.recommendations + + validator_result = %__MODULE__{ + project_map: project_map, + validation_results: validation_results, + compliance_score: calculate_compliance_score(validation_results), + violations: extract_violations(validation_results) ++ structure_analysis.violations, + recommendations: all_recommendations + } + + {:ok, validator_result} + end + end + + @doc """ + Validates a single file against WDD principles. + """ + @spec validate_file(String.t(), ProjectMapper.t()) :: validation_result() + def validate_file(file_path, project_map) do + layer = determine_file_layer(file_path, project_map) + + case File.read(file_path) do + {:ok, content} -> + violations = validate_content(content, layer, file_path) + + %{ + file_path: file_path, + layer: layer, + violations: violations, + score: calculate_file_score(violations), + recommendations: generate_file_recommendations(violations, layer) + } + + {:error, reason} -> + %{ + file_path: file_path, + layer: :unknown, + violations: [%{ + type: :file_read_error, + severity: :error, + line: nil, + message: "Cannot read file: #{reason}", + rule: "file_accessibility" + }], + score: 0.0, + recommendations: ["Ensure file is accessible and readable"] + } + end + end + + @doc """ + Validates content against functional core principles. + """ + def validate_functional_core(content, file_path) do + violations = [] + + violations = violations ++ check_side_effects(content, file_path) + violations = violations ++ check_function_purity(content, file_path) + violations = violations ++ check_composition_patterns(content, file_path) + violations = violations ++ check_abstraction_levels(content, file_path) + violations = violations ++ check_pattern_matching_usage(content, file_path) + + violations + end + + @doc """ + Validates content against boundary layer principles. + """ + def validate_boundary_layer(content, file_path) do + violations = [] + + violations = violations ++ check_genserver_patterns(content, file_path) + violations = violations ++ check_error_handling(content, file_path) + violations = violations ++ check_api_design(content, file_path) + violations = violations ++ check_state_management(content, file_path) + violations = violations ++ check_validation_placement(content, file_path) + + violations + end + + @doc """ + Validates content against data layer principles. + """ + def validate_data_layer(content, file_path) do + violations = [] + + violations = violations ++ check_struct_definitions(content, file_path) + violations = violations ++ check_data_immutability(content, file_path) + violations = violations ++ check_data_structure_choice(content, file_path) + violations = violations ++ check_access_patterns(content, file_path) + + violations + end + + @doc """ + Validates content against testing principles. + """ + def validate_test_layer(content, file_path) do + violations = [] + + violations = violations ++ check_test_organization(content, file_path) + violations = violations ++ check_test_behavior_focus(content, file_path) + violations = violations ++ check_test_naming(content, file_path) + violations = violations ++ check_setup_patterns(content, file_path) + violations = violations ++ check_assertion_quality(content, file_path) + + violations + end + + @doc """ + Analyzes project structure to detect when re-mapping might be needed. + """ + def analyze_project_structure_changes(file_list, project_map) do + violations = [] + recommendations = [] + + # Find files outside mapped directories + unmapped_files = find_unmapped_files(file_list, project_map) + + # Check for new directories that could be WDD layers + new_directories = find_potential_new_layer_directories(file_list, project_map) + + # Check if project type indicators have changed + type_changes = detect_project_type_changes(file_list, project_map) + + cond do + length(unmapped_files) > 5 -> + recommendations = [ + "Found #{length(unmapped_files)} files outside mapped WDD layers. Consider running 'mix wdd.remap' to update your project structure mapping." + ] ++ recommendations + + violations = [ + create_structure_violation(:unmapped_files, :info, + "Multiple files found outside WDD layer mapping", + "project_structure_drift") + ] ++ violations + + length(unmapped_files) > 0 -> + recommendations = [ + "Found #{length(unmapped_files)} files outside mapped directories. You may want to update your WDD layer mapping." + ] ++ recommendations + + length(new_directories) > 0 -> + new_dir_names = Enum.map(new_directories, &Path.basename/1) |> Enum.join(", ") + recommendations = [ + "Detected new directories (#{new_dir_names}) that could be WDD layers. Consider re-mapping if your project structure has evolved." + ] ++ recommendations + + type_changes.has_changes? -> + recommendations = [ + "Project type indicators suggest structural changes (#{type_changes.change_description}). Consider re-mapping your WDD layers." + ] ++ recommendations + + true -> + recommendations + end + + %{ + violations: violations, + recommendations: recommendations, + unmapped_files: unmapped_files, + new_directories: new_directories, + type_changes: type_changes + } + end + + # Private helper functions + + defp get_project_files(project_path) do + elixir_files = + project_path + |> Path.join("**/*.{ex,exs}") + |> Path.wildcard() + |> Enum.reject(&String.contains?(&1, "/_build/")) + |> Enum.reject(&String.contains?(&1, "/deps/")) + + {:ok, elixir_files} + end + + defp validate_files(file_list, project_map) do + Enum.map(file_list, fn file_path -> + validate_file(file_path, project_map) + end) + end + + defp determine_file_layer(file_path, project_map) do + layer_paths = project_map.layer_paths + + cond do + path_matches?(file_path, Map.get(layer_paths, :data)) -> :data + path_matches?(file_path, Map.get(layer_paths, :functions)) -> :functions + path_matches?(file_path, Map.get(layer_paths, :boundaries)) -> :boundaries + path_matches?(file_path, Map.get(layer_paths, :workers)) -> :workers + path_matches?(file_path, Map.get(layer_paths, :lifecycles)) -> :lifecycles + String.contains?(file_path, "test/") -> :tests + true -> :unknown + end + end + + defp path_matches?(file_path, layer_path) when is_binary(layer_path) do + String.contains?(file_path, layer_path) + end + + defp path_matches?(_, _), do: false + + defp validate_content(content, layer, file_path) do + case layer do + :functions -> validate_functional_core(content, file_path) + :boundaries -> validate_boundary_layer(content, file_path) + :data -> validate_data_layer(content, file_path) + :tests -> validate_test_layer(content, file_path) + :workers -> validate_boundary_layer(content, file_path) # Workers use boundary patterns + :lifecycles -> validate_lifecycle_layer(content, file_path) + _ -> [] + end + end + + defp validate_lifecycle_layer(content, file_path) do + violations = [] + + violations = violations ++ check_supervision_patterns(content, file_path) + violations = violations ++ check_application_structure(content, file_path) + violations = violations ++ check_child_specs(content, file_path) + + violations + end + + # Functional Core Validation Rules + + defp check_side_effects(content, file_path) do + violations = [] + + # Check for direct GenServer calls in functional core + if Regex.match?(~r/GenServer\.(call|cast|start|start_link)/, content) do + violations = [create_violation(:side_effect, :error, nil, + "Functional core should not contain GenServer calls", + "functional_core_purity", file_path) | violations] + end + + # Check for direct process spawning + if Regex.match?(~r/spawn(_link)?/, content) do + violations = [create_violation(:side_effect, :error, nil, + "Functional core should not spawn processes", + "functional_core_purity", file_path) | violations] + end + + # Check for file I/O operations + if Regex.match?(~r/File\.(read|write|open)/, content) do + violations = [create_violation(:side_effect, :error, nil, + "Functional core should not perform file I/O", + "functional_core_purity", file_path) | violations] + end + + # Check for logging + if Regex.match?(~r/Logger\.(info|debug|warn|error)/, content) do + violations = [create_violation(:side_effect, :warning, nil, + "Consider moving logging to boundary layer", + "functional_core_purity", file_path) | violations] + end + + violations + end + + defp check_function_purity(content, file_path) do + violations = [] + + # Check for functions that don't return anything (side effect only) + if Regex.match?(~r/def \w+.*do\s*\n.*\n\s*end\s*$/m, content) and + not Regex.match?(~r/def \w+.*do\s*\n.*return|{:|\.|\w+\s*\n\s*end/m, content) do + violations = [create_violation(:impure_function, :warning, nil, + "Functions should return values rather than just performing side effects", + "function_purity", file_path) | violations] + end + + violations + end + + defp check_composition_patterns(content, file_path) do + violations = [] + + # Check for proper pipe usage + lines = String.split(content, "\n") + Enum.with_index(lines) + |> Enum.each(fn {line, index} -> + if String.contains?(line, "|>") and String.contains?(line, "(") do + if not Regex.match?(~r/\|>\s*\w+\(/, line) do + violations = [create_violation(:poor_composition, :info, index + 1, + "Consider using pipe-friendly function design", + "composition_patterns", file_path) | violations] + end + end + end) + + violations + end + + defp check_abstraction_levels(content, file_path) do + violations = [] + + # Check for mixed abstraction levels in functions + # This is a simplified check - real implementation would be more sophisticated + functions = extract_functions(content) + + Enum.each(functions, fn {function_name, function_content, line_num} -> + if has_mixed_abstraction_levels?(function_content) do + violations = [create_violation(:mixed_abstraction, :warning, line_num, + "Function '#{function_name}' mixes different abstraction levels", + "single_abstraction_level", file_path) | violations] + end + end) + + violations + end + + defp check_pattern_matching_usage(content, file_path) do + violations = [] + + # Check for if/else when pattern matching could be used + if Regex.match?(~r/if\s+.*\s+do.*else.*end/s, content) and + not Regex.match?(~r/case\s+.*\s+do/, content) do + violations = [create_violation(:poor_pattern_matching, :info, nil, + "Consider using pattern matching instead of if/else", + "pattern_matching_preference", file_path) | violations] + end + + violations + end + + # Boundary Layer Validation Rules + + defp check_genserver_patterns(content, file_path) do + violations = [] + + if String.contains?(content, "use GenServer") do + # Check for proper GenServer structure + if not Regex.match?(~r/def handle_call/, content) and + not Regex.match?(~r/def handle_cast/, content) do + violations = [create_violation(:incomplete_genserver, :warning, nil, + "GenServer should implement handle_call or handle_cast", + "genserver_completeness", file_path) | violations] + end + + # Check for proper init function + if not Regex.match?(~r/def init/, content) do + violations = [create_violation(:missing_init, :error, nil, + "GenServer must implement init/1 function", + "genserver_structure", file_path) | violations] + end + end + + violations + end + + defp check_error_handling(content, file_path) do + violations = [] + + # Check for with statements in boundary layer + if not Regex.match?(~r/with\s+.*<-/, content) and + Regex.match?(~r/def \w+.*do/, content) do + violations = [create_violation(:missing_error_handling, :info, nil, + "Consider using 'with' statements for error composition", + "railway_oriented_programming", file_path) | violations] + end + + # Check for proper tagged tuple returns + if Regex.match?(~r/def \w+.*do/, content) and + not Regex.match?(~r/\{:ok,|{:error,/, content) do + violations = [create_violation(:untagged_returns, :warning, nil, + "Consider returning tagged tuples {:ok, result} or {:error, reason}", + "tagged_tuple_returns", file_path) | violations] + end + + violations + end + + defp check_api_design(content, file_path) do + violations = [] + + # Check for public functions without @spec + functions = extract_public_functions(content) + + Enum.each(functions, fn {function_name, _, line_num} -> + if not has_spec_for_function?(content, function_name) do + violations = [create_violation(:missing_spec, :info, line_num, + "Public function '#{function_name}' should have @spec", + "api_documentation", file_path) | violations] + end + end) + + violations + end + + defp check_state_management(content, file_path) do + violations = [] + + # Check for state mutations in functional core calls + if String.contains?(content, "use GenServer") and + Regex.match?(~r/def handle_.*\(.*state.*\)/s, content) do + if not Regex.match?(~r/\{:reply,.*new_state\}|\{:noreply,.*new_state\}/s, content) do + violations = [create_violation(:improper_state_management, :warning, nil, + "GenServer callbacks should return proper state transitions", + "state_management", file_path) | violations] + end + end + + violations + end + + defp check_validation_placement(content, file_path) do + violations = [] + + # This would check if validations are properly placed at boundary + # Implementation would be project-specific + + violations + end + + # Data Layer Validation Rules + + defp check_struct_definitions(content, file_path) do + violations = [] + + if Regex.match?(~r/defstruct/, content) do + # Check for default values + if not Regex.match?(~r/defstruct.*:.*,/, content) do + violations = [create_violation(:struct_without_defaults, :info, nil, + "Consider providing default values in struct definition", + "struct_best_practices", file_path) | violations] + end + end + + violations + end + + defp check_data_immutability(content, file_path) do + violations = [] + + # Check for mutating operations (this is simplified) + if Regex.match?(~r/Map\.put\(.*,.*,.*\)/, content) and + String.contains?(content, "defstruct") do + violations = [create_violation(:data_mutation, :info, nil, + "Consider using struct update syntax: %{struct | field: value}", + "immutability_patterns", file_path) | violations] + end + + violations + end + + defp check_data_structure_choice(content, file_path) do + violations = [] + + # Check for deeply nested maps + if Regex.match?(~r/%\{.*%\{.*%\{/, content) do + violations = [create_violation(:deep_nesting, :warning, nil, + "Deeply nested maps are hard to work with. Consider flattening or using structs", + "data_structure_design", file_path) | violations] + end + + violations + end + + defp check_access_patterns(content, file_path) do + violations = [] + + # This would analyze if access patterns match data structure choices + # Implementation would be more sophisticated in practice + + violations + end + + # Test Layer Validation Rules + + defp check_test_organization(content, file_path) do + violations = [] + + if String.contains?(file_path, "_test.exs") do + # Check for describe blocks + if Regex.match?(~r/test\s+"/, content) and + not Regex.match?(~r/describe\s+"/, content) do + violations = [create_violation(:poor_test_organization, :info, nil, + "Consider using 'describe' blocks to organize related tests", + "test_organization", file_path) | violations] + end + end + + violations + end + + defp check_test_behavior_focus(content, file_path) do + violations = [] + + # Check if tests focus on behavior rather than implementation + if Regex.match?(~r/assert.*private_function/, content) do + violations = [create_violation(:testing_implementation, :warning, nil, + "Tests should focus on public behavior, not private implementation", + "behavior_testing", file_path) | violations] + end + + violations + end + + defp check_test_naming(content, file_path) do + violations = [] + + tests = extract_test_names(content) + + Enum.each(tests, fn {test_name, line_num} -> + if String.length(test_name) < 10 or not String.contains?(test_name, " ") do + violations = [create_violation(:poor_test_naming, :info, line_num, + "Test name '#{test_name}' should be more descriptive", + "test_naming", file_path) | violations] + end + end) + + violations + end + + defp check_setup_patterns(content, file_path) do + violations = [] + + # Check for repeated setup code + if Enum.count(String.split(content, "setup"), fn _ -> true end) > 3 and + not Regex.match?(~r/setup_all/, content) do + violations = [create_violation(:repeated_setup, :info, nil, + "Consider using setup_all or named setups to reduce duplication", + "test_setup", file_path) | violations] + end + + violations + end + + defp check_assertion_quality(content, file_path) do + violations = [] + + # Check for generic assertions + if Regex.match?(~r/assert\s+true/, content) or + Regex.match?(~r/assert\s+false/, content) do + violations = [create_violation(:generic_assertions, :warning, nil, + "Avoid generic assertions like 'assert true'. Use specific assertions", + "assertion_quality", file_path) | violations] + end + + violations + end + + # Lifecycle Layer Validation Rules + + defp check_supervision_patterns(content, file_path) do + violations = [] + + if String.contains?(content, "Supervisor") do + # Check for proper child specs + if not Regex.match?(~r/children\s*=/, content) do + violations = [create_violation(:missing_child_specs, :warning, nil, + "Supervisor should define children specifications", + "supervision_structure", file_path) | violations] + end + end + + violations + end + + defp check_application_structure(content, file_path) do + violations = [] + + if String.contains?(content, "use Application") do + # Check for proper start function + if not Regex.match?(~r/def start/, content) do + violations = [create_violation(:missing_start_function, :error, nil, + "Application must implement start/2 function", + "application_structure", file_path) | violations] + end + end + + violations + end + + defp check_child_specs(content, file_path) do + violations = [] + + # Check for proper child specification format + if Regex.match?(~r/children\s*=/, content) do + if not Regex.match?(~r/\{.*,.*\}|\w+\.child_spec/, content) do + violations = [create_violation(:improper_child_specs, :warning, nil, + "Child specifications should follow proper format", + "child_spec_format", file_path) | violations] + end + end + + violations + end + + # Helper functions for validation logic + + defp create_violation(type, severity, line, message, rule, file_path) do + %{ + type: type, + severity: severity, + line: line, + message: message, + rule: rule, + file: file_path + } + end + + defp extract_functions(content) do + # Simplified function extraction + content + |> String.split("\n") + |> Enum.with_index() + |> Enum.reduce([], fn {line, index}, acc -> + case Regex.run(~r/def\s+(\w+)/, line) do + [_, function_name] -> [{function_name, "", index + 1} | acc] + _ -> acc + end + end) + |> Enum.reverse() + end + + defp extract_public_functions(content) do + extract_functions(content) + |> Enum.reject(fn {name, _, _} -> String.starts_with?(name, "_") end) + end + + defp extract_test_names(content) do + content + |> String.split("\n") + |> Enum.with_index() + |> Enum.reduce([], fn {line, index}, acc -> + case Regex.run(~r/test\s+"([^"]+)"/, line) do + [_, test_name] -> [{test_name, index + 1} | acc] + _ -> acc + end + end) + |> Enum.reverse() + end + + defp has_spec_for_function?(content, function_name) do + Regex.match?(~r/@spec\s+#{function_name}/, content) + end + + defp has_mixed_abstraction_levels?(_function_content) do + # Simplified check - real implementation would analyze AST + false + end + + defp calculate_file_score(violations) do + total_points = 100.0 + + penalty = Enum.reduce(violations, 0.0, fn violation, acc -> + penalty_value = case violation.severity do + :error -> 10.0 + :warning -> 5.0 + :info -> 2.0 + end + acc + penalty_value + end) + + max(0.0, total_points - penalty) + end + + defp calculate_compliance_score(validation_results) do + if Enum.empty?(validation_results) do + 0.0 + else + total_score = Enum.reduce(validation_results, 0.0, fn result, acc -> + acc + result.score + end) + + total_score / length(validation_results) + end + end + + defp extract_violations(validation_results) do + Enum.flat_map(validation_results, fn result -> + result.violations + end) + end + + defp generate_recommendations(validation_results) do + violations = extract_violations(validation_results) + + violations + |> Enum.group_by(fn violation -> violation.type end) + |> Enum.map(fn {type, type_violations} -> + count = length(type_violations) + generate_recommendation_for_type(type, count) + end) + |> Enum.reject(&is_nil/1) + end + + defp generate_file_recommendations(violations, layer) do + recommendations = [] + + recommendations = if Enum.any?(violations, fn v -> v.type == :side_effect end) do + ["Move side effects to boundary layer"] ++ recommendations + else + recommendations + end + + recommendations = if layer == :functions and + Enum.any?(violations, fn v -> v.type == :poor_composition end) do + ["Improve function composition with pipes"] ++ recommendations + else + recommendations + end + + recommendations + end + + defp generate_recommendation_for_type(type, count) do + case type do + :side_effect -> + "Found #{count} side effect(s) in functional core. Move these to boundary layer." + :missing_spec -> + "#{count} public function(s) missing @spec. Add type specifications for better documentation." + :poor_test_naming -> + "#{count} test(s) have poor naming. Use descriptive test names that explain behavior." + :generic_assertions -> + "#{count} test(s) use generic assertions. Use specific assertions for better test clarity." + _ -> + nil + end + end + + # Structure analysis helper functions + + defp find_unmapped_files(file_list, project_map) do + mapped_paths = Map.values(project_map.layer_paths) |> Enum.reject(&is_nil/1) + + Enum.filter(file_list, fn file_path -> + not is_file_in_mapped_layers?(file_path, mapped_paths) and + not is_standard_project_file?(file_path) + end) + end + + defp is_file_in_mapped_layers?(file_path, mapped_paths) do + Enum.any?(mapped_paths, fn layer_path -> + String.contains?(file_path, layer_path) + end) or String.contains?(file_path, "test/") + end + + defp is_standard_project_file?(file_path) do + standard_patterns = [ + "mix.exs", + "config/", + "_build/", + "deps/", + ".git/", + "README", + "LICENSE" + ] + + Enum.any?(standard_patterns, fn pattern -> + String.contains?(file_path, pattern) + end) + end + + defp find_potential_new_layer_directories(file_list, project_map) do + mapped_dirs = Map.values(project_map.layer_paths) + |> Enum.reject(&is_nil/1) + |> Enum.map(&Path.dirname/1) + |> MapSet.new() + + all_dirs = file_list + |> Enum.map(&Path.dirname/1) + |> Enum.uniq() + |> Enum.filter(fn dir -> + String.contains?(dir, "/lib/") and not MapSet.member?(mapped_dirs, dir) + end) + + # Look for directories with multiple Elixir files that could be new layers + Enum.filter(all_dirs, fn dir -> + files_in_dir = Enum.count(file_list, fn file -> String.starts_with?(file, dir) end) + files_in_dir >= 3 + end) + end + + defp detect_project_type_changes(file_list, project_map) do + current_indicators = detect_current_project_indicators(file_list) + + type_change_detected = case project_map.project_type do + :phoenix_web -> + not (current_indicators.has_phoenix_web? and current_indicators.has_web_files?) + :phoenix_api -> + not (current_indicators.has_phoenix? and current_indicators.has_api_files?) + :otp_application -> + not current_indicators.has_otp_patterns? + :library -> + current_indicators.has_application_patterns? + _ -> + false + end + + change_description = if type_change_detected do + describe_type_changes(project_map.project_type, current_indicators) + else + "" + end + + %{ + has_changes?: type_change_detected, + change_description: change_description, + current_indicators: current_indicators + } + end + + defp detect_current_project_indicators(file_list) do + content_samples = file_list + |> Enum.take(10) + |> Enum.map(&safe_read_file/1) + |> Enum.join("\n") + + %{ + has_phoenix?: String.contains?(content_samples, "Phoenix."), + has_phoenix_web?: Enum.any?(file_list, &String.contains?(&1, "_web/")), + has_web_files?: Enum.any?(file_list, &String.contains?(&1, "router.ex")), + has_api_files?: Enum.any?(file_list, &String.contains?(&1, "api/")), + has_otp_patterns?: String.contains?(content_samples, "GenServer") or String.contains?(content_samples, "Supervisor"), + has_application_patterns?: String.contains?(content_samples, "use Application") + } + end + + defp safe_read_file(file_path) do + case File.read(file_path) do + {:ok, content} -> String.slice(content, 0, 1000) # Read first 1KB for analysis + {:error, _} -> "" + end + end + + defp describe_type_changes(original_type, current_indicators) do + case original_type do + :library when current_indicators.has_application_patterns? -> + "library evolved into OTP application" + :otp_application when current_indicators.has_phoenix? -> + "OTP application now includes Phoenix" + :phoenix_api when current_indicators.has_web_files? -> + "Phoenix API now includes web components" + _ -> + "project structure has evolved" + end + end + + defp create_structure_violation(type, severity, message, rule) do + %{ + type: type, + severity: severity, + line: nil, + message: message, + rule: rule, + file: "project_structure" + } + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/templates/boundary_genserver.ex.eex b/intent/plugins/claude/subagents/worker-bee/resources/templates/boundary_genserver.ex.eex new file mode 100644 index 0000000..d6d08a9 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/templates/boundary_genserver.ex.eex @@ -0,0 +1,63 @@ +defmodule <%= module_prefix %>.<%= module_name %> do + @moduledoc """ + Boundary layer GenServer for <%= module_name %>. + + This module manages state and side effects while delegating + business logic to the functional core. + + Generated by Worker-Bee Agent on <%= timestamp %> + """ + + use GenServer + + alias <%= module_prefix %>.<%= module_name %>Core + + @type state :: map() + + # Client API + + @doc """ + Starts the <%= module_name %> server. + """ + @spec start_link(keyword()) :: GenServer.on_start() + def start_link(opts \\ []) do + name = Keyword.get(opts, :name, __MODULE__) + GenServer.start_link(__MODULE__, opts, name: name) + end + + @doc """ + Gets the current state. + """ + @spec get_state(GenServer.server()) :: state() + def get_state(server \\ __MODULE__) do + GenServer.call(server, :get_state) + end + + # Server Callbacks + + @impl true + def init(opts) do + initial_state = %{ + # Initialize state here + } + + {:ok, initial_state} + end + + @impl true + def handle_call(:get_state, _from, state) do + {:reply, state, state} + end + + @impl true + def handle_cast({:async_operation, params}, state) do + # Handle async operations + {:noreply, state} + end + + @impl true + def handle_info(msg, state) do + # Handle info messages + {:noreply, state} + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/templates/functional_core.ex.eex b/intent/plugins/claude/subagents/worker-bee/resources/templates/functional_core.ex.eex new file mode 100644 index 0000000..23f32db --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/templates/functional_core.ex.eex @@ -0,0 +1,48 @@ +defmodule <%= module_prefix %>.<%= module_name %> do + @moduledoc """ + Functional core module for <%= module_name %>. + + This module contains pure business logic without side effects, + following Worker-Bee Driven Design principles. + + Generated by Worker-Bee Agent on <%= timestamp %> + """ + + @type t :: %__MODULE__{} + + defstruct [] + + @doc """ + Creates a new <%= underscore_name %>. + """ + @spec new(map()) :: {:ok, t()} | {:error, String.t()} + def new(attrs \\ %{}) do + # Implementation here + {:ok, %__MODULE__{}} + end + + @doc """ + Updates a <%= underscore_name %> with new attributes. + """ + @spec update(t(), map()) :: {:ok, t()} | {:error, String.t()} + def update(%__MODULE__{} = <%= underscore_name %>, attrs) do + # Implementation here + {:ok, <%= underscore_name %>} + end + + @doc """ + Validates a <%= underscore_name %>. + """ + @spec validate(t()) :: {:ok, t()} | {:error, String.t()} + def validate(%__MODULE__{} = <%= underscore_name %>) do + # Validation logic here + {:ok, <%= underscore_name %>} + end + + # Private helper functions + + defp do_something(data) do + # Pure function implementation + data + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/validation/boundary_rules.ex b/intent/plugins/claude/subagents/worker-bee/resources/validation/boundary_rules.ex new file mode 100644 index 0000000..523d4ae --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/validation/boundary_rules.ex @@ -0,0 +1,522 @@ +defmodule WorkerBee.Validation.BoundaryRules do + @moduledoc """ + Validation rules for boundary layer compliance. + + The boundary layer handles state, side effects, and provides clean APIs + while delegating business logic to the functional core. + """ + + @doc """ + Validates GenServer implementation patterns. + """ + def validate_genserver_patterns(content, file_path) do + violations = [] + + if String.contains?(content, "use GenServer") do + violations = violations ++ check_genserver_structure(content, file_path) + violations = violations ++ check_callback_implementation(content, file_path) + violations = violations ++ check_state_management(content, file_path) + violations = violations ++ check_api_separation(content, file_path) + end + + violations + end + + @doc """ + Validates error handling patterns. + """ + def validate_error_handling(content, file_path) do + violations = [] + + violations = violations ++ check_with_statements(content, file_path) + violations = violations ++ check_tagged_tuples(content, file_path) + violations = violations ++ check_error_propagation(content, file_path) + + violations + end + + @doc """ + Validates API design patterns. + """ + def validate_api_design(content, file_path) do + violations = [] + + violations = violations ++ check_function_specs(content, file_path) + violations = violations ++ check_input_validation(content, file_path) + violations = violations ++ check_api_consistency(content, file_path) + violations = violations ++ check_backwards_compatibility(content, file_path) + + violations + end + + @doc """ + Validates separation of concerns. + """ + def validate_separation_of_concerns(content, file_path) do + violations = [] + + violations = violations ++ check_business_logic_delegation(content, file_path) + violations = violations ++ check_side_effect_isolation(content, file_path) + violations = violations ++ check_module_responsibilities(content, file_path) + + violations + end + + # Private validation functions + + defp check_genserver_structure(content, file_path) do + violations = [] + + # Check for required callbacks + required_callbacks = ["init"] + missing_callbacks = Enum.filter(required_callbacks, fn callback -> + not Regex.match?(~r/def #{callback}/, content) + end) + + violations = Enum.reduce(missing_callbacks, violations, fn callback, acc -> + [create_violation(:missing_callback, :error, nil, + "GenServer must implement #{callback}/1 callback", + "genserver_structure", file_path) | acc] + end) + + # Check for handle_* implementation + if not Regex.match?(~r/def handle_(call|cast|info)/, content) do + violations = [create_violation(:incomplete_genserver, :warning, nil, + "GenServer should implement at least one handle_* callback", + "genserver_completeness", file_path) | violations] + end + + violations + end + + defp check_callback_implementation(content, file_path) do + violations = [] + + # Check handle_call return patterns + handle_call_matches = Regex.scan(~r/def handle_call.*do(.*?)(?=def|\z)/s, content) + + Enum.reduce(handle_call_matches, violations, fn [_full_match, callback_body], acc -> + if not Regex.match?(~r/\{:reply,.*,.*\}/, callback_body) do + [create_violation(:improper_callback_return, :warning, nil, + "handle_call should return {:reply, response, state}", + "callback_patterns", file_path) | acc] + else + acc + end + end) + end + + defp check_state_management(content, file_path) do + violations = [] + + # Check for direct state mutation + if Regex.match?(~r/state\s*=\s*.*/, content) and + String.contains?(content, "use GenServer") do + violations = [create_violation(:direct_state_mutation, :info, nil, + "Consider functional state updates instead of direct mutation", + "state_management", file_path) | violations] + end + + # Check for state structure consistency + if String.contains?(content, "use GenServer") and + not Regex.match?(~r/@type state/, content) do + violations = [create_violation(:missing_state_type, :info, nil, + "Consider defining @type state :: ... for state structure", + "state_documentation", file_path) | violations] + end + + violations + end + + defp check_api_separation(content, file_path) do + violations = [] + + # Check if client API and server callbacks are properly separated + has_client_api = Regex.match?(~r/def (start_link|get_|set_|update_)/, content) + has_server_callbacks = Regex.match?(~r/def (init|handle_)/, content) + + if has_client_api and has_server_callbacks do + if not has_clear_api_separation?(content) do + violations = [create_violation(:mixed_api_implementation, :info, nil, + "Consider separating client API from server implementation with comments", + "api_organization", file_path) | violations] + end + end + + violations + end + + defp check_with_statements(content, file_path) do + violations = [] + + # Check for complex functions without with statements + complex_functions = find_complex_functions(content) + + Enum.reduce(complex_functions, violations, fn {func_name, line_num}, acc -> + func_content = extract_function_content(content, line_num) + + if has_multiple_operations?(func_content) and + not Regex.match?(~r/with\s+.*<-/, func_content) do + [create_violation(:missing_with_statement, :info, line_num, + "Function '#{func_name}' could benefit from 'with' for error composition", + "railway_oriented_programming", file_path) | acc] + else + acc + end + end) + end + + defp check_tagged_tuples(content, file_path) do + violations = [] + + # Check for untagged returns in public functions + public_functions = extract_public_functions(content) + + Enum.reduce(public_functions, violations, fn {func_name, func_content, line_num}, acc -> + if not has_tagged_returns?(func_content) and + looks_like_operation_function?(func_name) do + [create_violation(:untagged_returns, :info, line_num, + "Function '#{func_name}' should return tagged tuples {:ok, result} or {:error, reason}", + "tagged_tuple_returns", file_path) | acc] + else + acc + end + end) + end + + defp check_error_propagation(content, file_path) do + violations = [] + + # Check for proper error handling in with statements + with_statements = Regex.scan(~r/with\s+.*do(.*?)(?:else(.*?))?end/s, content) + + Enum.reduce(with_statements, violations, fn [_full, _do_block, else_block], acc -> + if else_block == "" or is_nil(else_block) do + [create_violation(:missing_error_handling, :warning, nil, + "'with' statement should have 'else' clause for error handling", + "error_propagation", file_path) | acc] + else + acc + end + end) + end + + defp check_function_specs(content, file_path) do + violations = [] + + public_functions = extract_public_functions(content) + + Enum.reduce(public_functions, violations, fn {func_name, _func_content, line_num}, acc -> + if not has_spec_before_function?(content, func_name, line_num) do + [create_violation(:missing_spec, :info, line_num, + "Public function '#{func_name}' should have @spec", + "api_documentation", file_path) | acc] + else + acc + end + end) + end + + defp check_input_validation(content, file_path) do + violations = [] + + # Check if boundary functions validate input + api_functions = extract_api_functions(content) + + Enum.reduce(api_functions, violations, fn {func_name, func_content, line_num}, acc -> + if not has_input_validation?(func_content) and + has_external_params?(func_content) do + [create_violation(:missing_input_validation, :warning, line_num, + "API function '#{func_name}' should validate input parameters", + "input_validation", file_path) | acc] + else + acc + end + end) + end + + defp check_api_consistency(content, file_path) do + violations = [] + + # Check for consistent return patterns across API + api_functions = extract_api_functions(content) + return_patterns = Enum.map(api_functions, fn {_name, content, _line} -> + extract_return_pattern(content) + end) + + unique_patterns = Enum.uniq(return_patterns) + + if length(unique_patterns) > 2 do + violations = [create_violation(:inconsistent_api, :info, nil, + "API functions have inconsistent return patterns", + "api_consistency", file_path) | violations] + end + + violations + end + + defp check_backwards_compatibility(content, file_path) do + violations = [] + + # Check for functions with many required parameters (hard to extend) + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, func_content, line_num}, acc -> + param_count = count_required_parameters(func_content) + + if param_count > 4 do + [create_violation(:too_many_parameters, :info, line_num, + "Function '#{func_name}' has #{param_count} parameters. Consider using options map", + "backwards_compatibility", file_path) | acc] + else + acc + end + end) + end + + defp check_business_logic_delegation(content, file_path) do + violations = [] + + # Check if boundary layer delegates to functional core + if String.contains?(content, "use GenServer") do + handle_functions = extract_handle_functions(content) + + Enum.reduce(handle_functions, violations, fn {func_name, func_content, line_num}, acc -> + if has_complex_business_logic?(func_content) and + not delegates_to_core?(func_content) do + [create_violation(:business_logic_in_boundary, :warning, line_num, + "#{func_name} contains business logic. Consider delegating to functional core", + "separation_of_concerns", file_path) | acc] + else + acc + end + end) + end + + violations + end + + defp check_side_effect_isolation(content, file_path) do + violations = [] + + # This is actually expected in boundary layer, so we check for proper patterns + side_effect_operations = [ + ~r/File\./, + ~r/HTTPoison\./, + ~r/Repo\./, + ~r/Logger\./ + ] + + Enum.reduce(side_effect_operations, violations, fn pattern, acc -> + if Regex.match?(pattern, content) do + # Check if side effects are properly handled with error patterns + if not has_proper_side_effect_handling?(content, pattern) do + [create_violation(:improper_side_effect_handling, :info, nil, + "Side effects should be wrapped with proper error handling", + "side_effect_isolation", file_path) | acc] + else + acc + end + else + acc + end + end) + end + + defp check_module_responsibilities(content, file_path) do + violations = [] + + # Check if module has too many responsibilities + responsibility_indicators = [ + {~r/use GenServer/, "process_management"}, + {~r/def.*validate/, "validation"}, + {~r/def.*format/, "formatting"}, + {~r/def.*parse/, "parsing"}, + {~r/File\./, "file_operations"}, + {~r/HTTPoison\./, "http_operations"} + ] + + responsibilities = Enum.filter(responsibility_indicators, fn {pattern, _name} -> + Regex.match?(pattern, content) + end) + + if length(responsibilities) > 3 do + responsibility_names = Enum.map(responsibilities, fn {_pattern, name} -> name end) + violations = [create_violation(:too_many_responsibilities, :info, nil, + "Module has too many responsibilities: #{Enum.join(responsibility_names, ", ")}", + "single_responsibility", file_path) | violations] + end + + violations + end + + # Helper functions + + defp create_violation(type, severity, line, message, rule, file_path) do + %{ + type: type, + severity: severity, + line: line, + message: message, + rule: rule, + file: file_path + } + end + + defp has_clear_api_separation?(content) do + # Check for comments separating API from implementation + Regex.match?(~r/# (Client API|Server|Implementation|Callbacks)/, content) + end + + defp find_complex_functions(content) do + functions = extract_function_definitions(content) + + Enum.filter(functions, fn {_name, func_content, _line} -> + calculate_complexity(func_content) > 3 + end) + |> Enum.map(fn {name, _content, line} -> {name, line} end) + end + + defp extract_function_content(_content, _line_num) do + # Simplified - real implementation would parse AST + "" + end + + defp has_multiple_operations?(func_content) do + operation_count = 0 + operation_count = operation_count + Enum.count(Regex.scan(~r/\w+\.\w+\(/, func_content)) + operation_count = operation_count + Enum.count(Regex.scan(~r/GenServer\./, func_content)) + + operation_count > 2 + end + + defp extract_public_functions(content) do + extract_function_definitions(content) + |> Enum.reject(fn {name, _content, _line} -> String.starts_with?(name, "_") end) + end + + defp extract_function_definitions(content) do + # Simplified function extraction + content + |> String.split("\n") + |> Enum.with_index(1) + |> Enum.reduce([], fn {line, line_num}, acc -> + case Regex.run(~r/def\s+(\w+)/, line) do + [_, func_name] -> + [{func_name, "", line_num} | acc] + _ -> + acc + end + end) + |> Enum.reverse() + end + + defp has_tagged_returns?(func_content) do + Regex.match?(~r/\{:ok,|\{:error,/, func_content) + end + + defp looks_like_operation_function?(func_name) do + operation_verbs = ["create", "update", "delete", "get", "fetch", "process", "handle", "perform"] + Enum.any?(operation_verbs, &String.starts_with?(func_name, &1)) + end + + defp has_spec_before_function?(content, func_name, line_num) do + lines = String.split(content, "\n") + + if line_num > 1 do + previous_line = Enum.at(lines, line_num - 2, "") + Regex.match?(~r/@spec\s+#{func_name}/, previous_line) + else + false + end + end + + defp extract_api_functions(content) do + # Functions that look like public API (not handle_* or init) + extract_function_definitions(content) + |> Enum.reject(fn {name, _content, _line} -> + String.starts_with?(name, "_") or + String.starts_with?(name, "handle_") or + name == "init" + end) + end + + defp has_input_validation?(func_content) do + validation_patterns = [ + ~r/when\s+is_/, + ~r/validate/, + ~r/\|>\s*check/, + ~r/with\s+.*<-\s*.*validate/ + ] + + Enum.any?(validation_patterns, &Regex.match?(&1, func_content)) + end + + defp has_external_params?(func_content) do + # Simple heuristic - functions with parameters likely have external input + Regex.match?(~r/def\s+\w+\([^)]+\)/, func_content) + end + + defp extract_return_pattern(func_content) do + cond do + Regex.match?(~r/\{:ok,.*\}/, func_content) -> :tagged_tuple + Regex.match?(~r/\w+/, func_content) -> :direct_value + true -> :unknown + end + end + + defp count_required_parameters(func_content) do + case Regex.run(~r/def\s+\w+\(([^)]*)\)/, func_content) do + [_, params_str] -> + params_str + |> String.split(",") + |> Enum.reject(&String.contains?(&1, "\\\\")) # Exclude default params + |> length() + _ -> + 0 + end + end + + defp extract_handle_functions(content) do + extract_function_definitions(content) + |> Enum.filter(fn {name, _content, _line} -> + String.starts_with?(name, "handle_") + end) + end + + defp has_complex_business_logic?(func_content) do + business_logic_indicators = [ + ~r/calculate/, + ~r/compute/, + ~r/process.*data/, + ~r/transform/, + ~r/aggregate/ + ] + + Enum.any?(business_logic_indicators, &Regex.match?(&1, func_content)) + end + + defp delegates_to_core?(func_content) do + delegation_patterns = [ + ~r/\w+Core\./, + ~r/\w+Service\./, + ~r/\w+Logic\./ + ] + + Enum.any?(delegation_patterns, &Regex.match?(&1, func_content)) + end + + defp has_proper_side_effect_handling?(content, _pattern) do + # Check if side effects are wrapped in proper error handling + Regex.match?(~r/with\s+.*<-|case\s+.*do/, content) + end + + defp calculate_complexity(func_content) do + # Simplified complexity calculation + complexity = 1 + complexity = complexity + Enum.count(Regex.scan(~r/if\s+/, func_content)) + complexity = complexity + Enum.count(Regex.scan(~r/case\s+/, func_content)) + complexity = complexity + Enum.count(Regex.scan(~r/with\s+/, func_content)) + complexity + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/validation/data_rules.ex b/intent/plugins/claude/subagents/worker-bee/resources/validation/data_rules.ex new file mode 100644 index 0000000..06df0bd --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/validation/data_rules.ex @@ -0,0 +1,79 @@ +defmodule WorkerBee.Validation.DataRules do + @moduledoc """ + Validation rules for data layer compliance. + + Data structures should be immutable, well-designed, and follow + appropriate access patterns. + """ + + def validate_struct_definitions(content, file_path) do + violations = [] + + if Regex.match?(~r/defstruct/, content) do + violations = violations ++ check_default_values(content, file_path) + violations = violations ++ check_field_types(content, file_path) + violations = violations ++ check_struct_documentation(content, file_path) + end + + violations + end + + def validate_data_immutability(content, file_path) do + violations = [] + + violations = violations ++ check_mutation_patterns(content, file_path) + violations = violations ++ check_update_syntax(content, file_path) + + violations + end + + def validate_data_structure_choice(content, file_path) do + violations = [] + + violations = violations ++ check_deep_nesting(content, file_path) + violations = violations ++ check_access_patterns(content, file_path) + + violations + end + + # Simplified implementation for transfer + + defp check_default_values(content, file_path) do + if Regex.match?(~r/defstruct\s+\[/, content) and + not Regex.match?(~r/defstruct.*:.*,/, content) do + [create_violation(:struct_without_defaults, :info, nil, + "Consider providing default values in struct definition", + "struct_best_practices", file_path)] + else + [] + end + end + + defp check_field_types(_content, _file_path), do: [] + defp check_struct_documentation(_content, _file_path), do: [] + defp check_mutation_patterns(_content, _file_path), do: [] + defp check_update_syntax(_content, _file_path), do: [] + + defp check_deep_nesting(content, file_path) do + if Regex.match?(~r/%\{.*%\{.*%\{/, content) do + [create_violation(:deep_nesting, :warning, nil, + "Deeply nested maps are hard to work with. Consider flattening or using structs", + "data_structure_design", file_path)] + else + [] + end + end + + defp check_access_patterns(_content, _file_path), do: [] + + defp create_violation(type, severity, line, message, rule, file_path) do + %{ + type: type, + severity: severity, + line: line, + message: message, + rule: rule, + file: file_path + } + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/validation/functional_core_rules.ex b/intent/plugins/claude/subagents/worker-bee/resources/validation/functional_core_rules.ex new file mode 100644 index 0000000..7d6f8d1 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/validation/functional_core_rules.ex @@ -0,0 +1,462 @@ +defmodule WorkerBee.Validation.FunctionalCoreRules do + @moduledoc """ + Validation rules for functional core layer compliance. + + The functional core must be pure, composable, and free from side effects. + These rules enforce the fundamental principles of Worker-Bee Driven Design. + """ + + @doc """ + Validates that functional core modules follow purity principles. + """ + def validate_purity(content, file_path) do + violations = [] + + # Check for GenServer operations + violations = violations ++ check_genserver_calls(content, file_path) + + # Check for process operations + violations = violations ++ check_process_operations(content, file_path) + + # Check for file I/O operations + violations = violations ++ check_file_operations(content, file_path) + + # Check for network operations + violations = violations ++ check_network_operations(content, file_path) + + # Check for logging operations + violations = violations ++ check_logging_operations(content, file_path) + + # Check for database operations + violations = violations ++ check_database_operations(content, file_path) + + violations + end + + @doc """ + Validates function composition patterns. + """ + def validate_composition(content, file_path) do + violations = [] + + # Check for pipeline-friendly function design + violations = violations ++ check_pipeline_design(content, file_path) + + # Check for proper error handling composition + violations = violations ++ check_error_composition(content, file_path) + + # Check for function chaining patterns + violations = violations ++ check_function_chaining(content, file_path) + + violations + end + + @doc """ + Validates single responsibility principle adherence. + """ + def validate_single_responsibility(content, file_path) do + violations = [] + + # Check function length and complexity + violations = violations ++ check_function_complexity(content, file_path) + + # Check for mixed abstraction levels + violations = violations ++ check_abstraction_levels(content, file_path) + + # Check for proper function naming + violations = violations ++ check_function_naming(content, file_path) + + violations + end + + @doc """ + Validates proper use of pattern matching. + """ + def validate_pattern_matching(content, file_path) do + violations = [] + + # Check for pattern matching over conditionals + violations = violations ++ check_pattern_vs_conditionals(content, file_path) + + # Check for guard clause usage + violations = violations ++ check_guard_usage(content, file_path) + + # Check for multiple function heads + violations = violations ++ check_function_heads(content, file_path) + + violations + end + + # Private validation functions + + defp check_genserver_calls(content, file_path) do + genserver_patterns = [ + ~r/GenServer\.(call|cast|start|start_link)/, + ~r/Agent\.(get|update|start|start_link)/, + ~r/Task\.(start|start_link|async)/ + ] + + Enum.flat_map(genserver_patterns, fn pattern -> + case Regex.run(pattern, content, return: :index) do + nil -> [] + _ -> [create_violation(:side_effect, :error, nil, + "Functional core should not contain GenServer/Agent/Task operations", + "functional_core_purity", file_path)] + end + end) + end + + defp check_process_operations(content, file_path) do + process_patterns = [ + ~r/spawn(_link|_monitor)?/, + ~r/Process\.(send|send_after|exit|flag)/, + ~r/receive\s+do/, + ~r/:timer\./ + ] + + Enum.flat_map(process_patterns, fn pattern -> + case Regex.run(pattern, content, return: :index) do + nil -> [] + _ -> [create_violation(:side_effect, :error, nil, + "Functional core should not perform process operations", + "functional_core_purity", file_path)] + end + end) + end + + defp check_file_operations(content, file_path) do + file_patterns = [ + ~r/File\.(read|write|open|close|copy|rename|rm|mkdir)/, + ~r/IO\.(puts|write|read|gets)/, + ~r/Path\.(wildcard|expand)/ + ] + + Enum.flat_map(file_patterns, fn pattern -> + case Regex.run(pattern, content, return: :index) do + nil -> [] + _ -> [create_violation(:side_effect, :error, nil, + "Functional core should not perform file I/O operations", + "functional_core_purity", file_path)] + end + end) + end + + defp check_network_operations(content, file_path) do + network_patterns = [ + ~r/HTTPoison\./, + ~r/Tesla\./, + ~r/Req\./, + ~r/:httpc\./, + ~r/:gen_tcp/, + ~r/:ssl/ + ] + + Enum.flat_map(network_patterns, fn pattern -> + case Regex.run(pattern, content, return: :index) do + nil -> [] + _ -> [create_violation(:side_effect, :error, nil, + "Functional core should not perform network operations", + "functional_core_purity", file_path)] + end + end) + end + + defp check_logging_operations(content, file_path) do + if Regex.match?(~r/Logger\.(info|debug|warn|error)/, content) do + [create_violation(:side_effect, :warning, nil, + "Consider moving logging to boundary layer", + "functional_core_purity", file_path)] + else + [] + end + end + + defp check_database_operations(content, file_path) do + db_patterns = [ + ~r/Repo\.(get|insert|update|delete|all)/, + ~r/Ecto\.Query/, + ~r/from\s+\w+\s+in/, + ~r/:mnesia\./ + ] + + Enum.flat_map(db_patterns, fn pattern -> + case Regex.run(pattern, content, return: :index) do + nil -> [] + _ -> [create_violation(:side_effect, :error, nil, + "Functional core should not perform database operations", + "functional_core_purity", file_path)] + end + end) + end + + defp check_pipeline_design(content, file_path) do + violations = [] + + # Check for data-last function design + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, func_content, line_num}, acc -> + if has_poor_pipeline_design?(func_content) do + [create_violation(:poor_composition, :info, line_num, + "Function '#{func_name}' could be more pipeline-friendly", + "pipeline_design", file_path) | acc] + else + acc + end + end) + end + + defp check_error_composition(content, file_path) do + violations = [] + + # Check for proper tagged tuple usage + if Regex.match?(~r/def \w+.*do/, content) and + not Regex.match?(~r/\{:ok,|{:error,/, content) do + violations = [create_violation(:poor_error_handling, :info, nil, + "Consider using tagged tuples {:ok, result} or {:error, reason}", + "error_composition", file_path) | violations] + end + + # Check for with statement usage in complex functions + complex_functions = find_complex_functions(content) + + Enum.reduce(complex_functions, violations, fn {func_name, line_num}, acc -> + if not has_with_statement_nearby?(content, line_num) do + [create_violation(:missing_error_composition, :info, line_num, + "Complex function '#{func_name}' might benefit from 'with' statement", + "error_composition", file_path) | acc] + else + acc + end + end) + end + + defp check_function_chaining(content, file_path) do + violations = [] + + # Check for excessive nesting instead of chaining + if Regex.match?(~r/\(\s*\w+\(\s*\w+\(\s*\w+\(/, content) do + violations = [create_violation(:poor_composition, :info, nil, + "Consider using pipe operator instead of nested function calls", + "function_chaining", file_path) | violations] + end + + violations + end + + defp check_function_complexity(content, file_path) do + violations = [] + + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, func_content, line_num}, acc -> + complexity_score = calculate_complexity(func_content) + + cond do + complexity_score > 10 -> + [create_violation(:high_complexity, :warning, line_num, + "Function '#{func_name}' is too complex (score: #{complexity_score})", + "function_complexity", file_path) | acc] + + complexity_score > 7 -> + [create_violation(:moderate_complexity, :info, line_num, + "Function '#{func_name}' could be simplified (score: #{complexity_score})", + "function_complexity", file_path) | acc] + + true -> + acc + end + end) + end + + defp check_abstraction_levels(content, file_path) do + violations = [] + + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, func_content, line_num}, acc -> + if has_mixed_abstraction_levels?(func_content) do + [create_violation(:mixed_abstraction, :warning, line_num, + "Function '#{func_name}' mixes different abstraction levels", + "abstraction_consistency", file_path) | acc] + else + acc + end + end) + end + + defp check_function_naming(content, file_path) do + violations = [] + + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, _func_content, line_num}, acc -> + cond do + String.length(func_name) < 3 -> + [create_violation(:poor_naming, :info, line_num, + "Function name '#{func_name}' is too short", + "function_naming", file_path) | acc] + + not Regex.match?(~r/^[a-z_][a-z0-9_]*[?!]?$/, func_name) -> + [create_violation(:poor_naming, :warning, line_num, + "Function name '#{func_name}' doesn't follow Elixir conventions", + "function_naming", file_path) | acc] + + String.contains?(func_name, ["temp", "tmp", "test", "foo", "bar"]) -> + [create_violation(:poor_naming, :info, line_num, + "Function name '#{func_name}' appears to be a placeholder", + "function_naming", file_path) | acc] + + true -> + acc + end + end) + end + + defp check_pattern_vs_conditionals(content, file_path) do + violations = [] + + # Look for if/else that could be pattern matching + if_else_patterns = Regex.scan(~r/if\s+.*\s+do.*else.*end/s, content, return: :index) + + Enum.reduce(if_else_patterns, violations, fn [{start, _length}], acc -> + line_num = count_lines_to_position(content, start) + + [create_violation(:suboptimal_pattern_matching, :info, line_num, + "Consider using pattern matching instead of if/else", + "pattern_matching_preference", file_path) | acc] + end) + end + + defp check_guard_usage(content, file_path) do + violations = [] + + # Check for type checks that could be guards + type_check_patterns = [ + ~r/is_atom\(/, + ~r/is_binary\(/, + ~r/is_integer\(/, + ~r/is_list\(/, + ~r/is_map\(/ + ] + + Enum.reduce(type_check_patterns, violations, fn pattern, acc -> + if Regex.match?(pattern, content) and + not Regex.match?(~r/when\s+is_\w+/, content) do + [create_violation(:missing_guards, :info, nil, + "Consider using guard clauses for type checks", + "guard_usage", file_path) | acc] + else + acc + end + end) + end + + defp check_function_heads(content, file_path) do + violations = [] + + # Check for functions that could benefit from multiple heads + functions = extract_function_definitions(content) + + Enum.reduce(functions, violations, fn {func_name, func_content, line_num}, acc -> + if has_complex_case_statements?(func_content) and + not has_multiple_function_heads?(content, func_name) do + [create_violation(:could_use_function_heads, :info, line_num, + "Function '#{func_name}' could use multiple function heads instead of case", + "function_heads", file_path) | acc] + else + acc + end + end) + end + + # Helper functions + + defp create_violation(type, severity, line, message, rule, file_path) do + %{ + type: type, + severity: severity, + line: line, + message: message, + rule: rule, + file: file_path + } + end + + defp extract_function_definitions(content) do + # Simplified function extraction + content + |> String.split("\n") + |> Enum.with_index(1) + |> Enum.reduce([], fn {line, line_num}, acc -> + case Regex.run(~r/def\s+(\w+)/, line) do + [_, func_name] -> + # Extract function content (simplified) + func_content = extract_function_content(content, line_num) + [{func_name, func_content, line_num} | acc] + _ -> + acc + end + end) + |> Enum.reverse() + end + + defp extract_function_content(_content, _start_line) do + # Simplified - in real implementation would parse to find function end + "" + end + + defp has_poor_pipeline_design?(_func_content) do + # Simplified check + false + end + + defp find_complex_functions(content) do + functions = extract_function_definitions(content) + + Enum.filter(functions, fn {_name, func_content, line_num} -> + complexity = calculate_complexity(func_content) + complexity > 5 + end) + |> Enum.map(fn {name, _content, line_num} -> {name, line_num} end) + end + + defp has_with_statement_nearby?(_content, _line_num) do + # Simplified check + false + end + + defp calculate_complexity(func_content) do + # Simplified complexity calculation + complexity = 1 + + complexity = complexity + Enum.count(Regex.scan(~r/if\s+/, func_content)) + complexity = complexity + Enum.count(Regex.scan(~r/case\s+/, func_content)) + complexity = complexity + Enum.count(Regex.scan(~r/cond\s+/, func_content)) + complexity = complexity + Enum.count(Regex.scan(~r/with\s+/, func_content)) + + complexity + end + + defp has_mixed_abstraction_levels?(_func_content) do + # Simplified check - real implementation would analyze AST + false + end + + defp count_lines_to_position(content, position) do + content + |> String.slice(0, position) + |> String.split("\n") + |> length() + end + + defp has_complex_case_statements?(func_content) do + case_matches = Regex.scan(~r/case\s+.*\s+do/, func_content) + length(case_matches) > 0 + end + + defp has_multiple_function_heads?(content, func_name) do + function_heads = Regex.scan(~r/def\s+#{func_name}/, content) + length(function_heads) > 1 + end +end \ No newline at end of file diff --git a/intent/plugins/claude/subagents/worker-bee/resources/validation/testing_rules.ex b/intent/plugins/claude/subagents/worker-bee/resources/validation/testing_rules.ex new file mode 100644 index 0000000..1102799 --- /dev/null +++ b/intent/plugins/claude/subagents/worker-bee/resources/validation/testing_rules.ex @@ -0,0 +1,94 @@ +defmodule WorkerBee.Validation.TestingRules do + @moduledoc """ + Validation rules for testing layer compliance. + + Tests should focus on behavior, be well-organized, and provide + comprehensive coverage of the application logic. + """ + + def validate_test_organization(content, file_path) do + violations = [] + + violations = violations ++ check_describe_blocks(content, file_path) + violations = violations ++ check_test_naming(content, file_path) + violations = violations ++ check_setup_patterns(content, file_path) + + violations + end + + def validate_test_behavior_focus(content, file_path) do + violations = [] + + violations = violations ++ check_behavior_vs_implementation(content, file_path) + violations = violations ++ check_assertion_quality(content, file_path) + violations = violations ++ check_test_isolation(content, file_path) + + violations + end + + # Implementation continues with comprehensive test validation rules... + # This is a simplified version for the transfer + + defp check_describe_blocks(content, file_path) do + if String.contains?(file_path, "_test.exs") and + Regex.match?(~r/test\s+"/, content) and + not Regex.match?(~r/describe\s+"/, content) do + [create_violation(:missing_describe_blocks, :info, nil, + "Consider using 'describe' blocks to organize related tests", + "test_organization", file_path)] + else + [] + end + end + + defp check_test_naming(content, file_path) do + test_matches = Regex.scan(~r/test\s+"([^"]+)"/, content) + + Enum.flat_map(test_matches, fn [_full, test_name] -> + if String.length(test_name) < 10 do + [create_violation(:poor_test_naming, :info, nil, + "Test name '#{test_name}' should be more descriptive", + "test_naming", file_path)] + else + [] + end + end) + end + + defp check_behavior_vs_implementation(content, file_path) do + if Regex.match?(~r/assert.*private_function/, content) do + [create_violation(:testing_implementation, :warning, nil, + "Tests should focus on public behavior, not private implementation", + "behavior_testing", file_path)] + else + [] + end + end + + defp check_assertion_quality(content, file_path) do + violations = [] + + if Regex.match?(~r/assert\s+true/, content) or + Regex.match?(~r/assert\s+false/, content) do + violations = [create_violation(:generic_assertions, :warning, nil, + "Avoid generic assertions like 'assert true'. Use specific assertions", + "assertion_quality", file_path) | violations] + end + + violations + end + + defp check_setup_patterns(_content, _file_path), do: [] + defp check_test_isolation(_content, _file_path), do: [] + + defp create_violation(type, severity, line, message, rule, file_path) do + %{ + type: type, + severity: severity, + line: line, + message: message, + rule: rule, + file: file_path + } + end +end \ No newline at end of file diff --git a/stp/prj/st/COMPLETED/ST0001/design.md b/intent/st/COMPLETED/ST0001/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0001/design.md rename to intent/st/COMPLETED/ST0001/design.md diff --git a/stp/prj/st/COMPLETED/ST0001/impl.md b/intent/st/COMPLETED/ST0001/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0001/impl.md rename to intent/st/COMPLETED/ST0001/impl.md diff --git a/stp/prj/st/COMPLETED/ST0001/info.md b/intent/st/COMPLETED/ST0001/info.md similarity index 96% rename from stp/prj/st/COMPLETED/ST0001/info.md rename to intent/st/COMPLETED/ST0001/info.md index b7624b4..ffc3d77 100644 --- a/stp/prj/st/COMPLETED/ST0001/info.md +++ b/intent/st/COMPLETED/ST0001/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0001/tasks.md b/intent/st/COMPLETED/ST0001/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0001/tasks.md rename to intent/st/COMPLETED/ST0001/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0002/design.md b/intent/st/COMPLETED/ST0002/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0002/design.md rename to intent/st/COMPLETED/ST0002/design.md diff --git a/stp/prj/st/COMPLETED/ST0002/impl.md b/intent/st/COMPLETED/ST0002/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0002/impl.md rename to intent/st/COMPLETED/ST0002/impl.md diff --git a/stp/prj/st/COMPLETED/ST0002/info.md b/intent/st/COMPLETED/ST0002/info.md similarity index 97% rename from stp/prj/st/COMPLETED/ST0002/info.md rename to intent/st/COMPLETED/ST0002/info.md index dc69704..dda1c81 100644 --- a/stp/prj/st/COMPLETED/ST0002/info.md +++ b/intent/st/COMPLETED/ST0002/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0002/tasks.md b/intent/st/COMPLETED/ST0002/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0002/tasks.md rename to intent/st/COMPLETED/ST0002/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0003/design.md b/intent/st/COMPLETED/ST0003/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0003/design.md rename to intent/st/COMPLETED/ST0003/design.md diff --git a/stp/prj/st/COMPLETED/ST0003/impl.md b/intent/st/COMPLETED/ST0003/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0003/impl.md rename to intent/st/COMPLETED/ST0003/impl.md diff --git a/stp/prj/st/COMPLETED/ST0003/info.md b/intent/st/COMPLETED/ST0003/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0003/info.md rename to intent/st/COMPLETED/ST0003/info.md index 2db43b3..f3d5ee0 100644 --- a/stp/prj/st/COMPLETED/ST0003/info.md +++ b/intent/st/COMPLETED/ST0003/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0003/tasks.md b/intent/st/COMPLETED/ST0003/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0003/tasks.md rename to intent/st/COMPLETED/ST0003/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0004/design.md b/intent/st/COMPLETED/ST0004/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0004/design.md rename to intent/st/COMPLETED/ST0004/design.md diff --git a/stp/prj/st/COMPLETED/ST0004/impl.md b/intent/st/COMPLETED/ST0004/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0004/impl.md rename to intent/st/COMPLETED/ST0004/impl.md diff --git a/stp/prj/st/COMPLETED/ST0004/info.md b/intent/st/COMPLETED/ST0004/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0004/info.md rename to intent/st/COMPLETED/ST0004/info.md index da54e09..fbb2a9e 100644 --- a/stp/prj/st/COMPLETED/ST0004/info.md +++ b/intent/st/COMPLETED/ST0004/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0004/tasks.md b/intent/st/COMPLETED/ST0004/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0004/tasks.md rename to intent/st/COMPLETED/ST0004/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0005/design.md b/intent/st/COMPLETED/ST0005/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0005/design.md rename to intent/st/COMPLETED/ST0005/design.md diff --git a/stp/prj/st/COMPLETED/ST0005/impl.md b/intent/st/COMPLETED/ST0005/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0005/impl.md rename to intent/st/COMPLETED/ST0005/impl.md diff --git a/stp/prj/st/COMPLETED/ST0005/info.md b/intent/st/COMPLETED/ST0005/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0005/info.md rename to intent/st/COMPLETED/ST0005/info.md index f61ffc4..8eef89f 100644 --- a/stp/prj/st/COMPLETED/ST0005/info.md +++ b/intent/st/COMPLETED/ST0005/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0005/tasks.md b/intent/st/COMPLETED/ST0005/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0005/tasks.md rename to intent/st/COMPLETED/ST0005/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0006/design.md b/intent/st/COMPLETED/ST0006/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0006/design.md rename to intent/st/COMPLETED/ST0006/design.md diff --git a/stp/prj/st/COMPLETED/ST0006/impl.md b/intent/st/COMPLETED/ST0006/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0006/impl.md rename to intent/st/COMPLETED/ST0006/impl.md diff --git a/stp/prj/st/COMPLETED/ST0006/info.md b/intent/st/COMPLETED/ST0006/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0006/info.md rename to intent/st/COMPLETED/ST0006/info.md index 96c886a..fa7d7ff 100644 --- a/stp/prj/st/COMPLETED/ST0006/info.md +++ b/intent/st/COMPLETED/ST0006/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0006/tasks.md b/intent/st/COMPLETED/ST0006/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0006/tasks.md rename to intent/st/COMPLETED/ST0006/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0007/design.md b/intent/st/COMPLETED/ST0007/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0007/design.md rename to intent/st/COMPLETED/ST0007/design.md diff --git a/stp/prj/st/COMPLETED/ST0007/impl.md b/intent/st/COMPLETED/ST0007/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0007/impl.md rename to intent/st/COMPLETED/ST0007/impl.md diff --git a/stp/prj/st/COMPLETED/ST0007/info.md b/intent/st/COMPLETED/ST0007/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0007/info.md rename to intent/st/COMPLETED/ST0007/info.md index 0540658..37c7a42 100644 --- a/stp/prj/st/COMPLETED/ST0007/info.md +++ b/intent/st/COMPLETED/ST0007/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0007/tasks.md b/intent/st/COMPLETED/ST0007/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0007/tasks.md rename to intent/st/COMPLETED/ST0007/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0008/design.md b/intent/st/COMPLETED/ST0008/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0008/design.md rename to intent/st/COMPLETED/ST0008/design.md diff --git a/stp/prj/st/COMPLETED/ST0008/impl.md b/intent/st/COMPLETED/ST0008/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0008/impl.md rename to intent/st/COMPLETED/ST0008/impl.md diff --git a/stp/prj/st/COMPLETED/ST0008/info.md b/intent/st/COMPLETED/ST0008/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0008/info.md rename to intent/st/COMPLETED/ST0008/info.md index 7b8e474..60daea4 100644 --- a/stp/prj/st/COMPLETED/ST0008/info.md +++ b/intent/st/COMPLETED/ST0008/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0008/tasks.md b/intent/st/COMPLETED/ST0008/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0008/tasks.md rename to intent/st/COMPLETED/ST0008/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0009/design.md b/intent/st/COMPLETED/ST0009/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0009/design.md rename to intent/st/COMPLETED/ST0009/design.md diff --git a/stp/prj/st/COMPLETED/ST0009/impl.md b/intent/st/COMPLETED/ST0009/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0009/impl.md rename to intent/st/COMPLETED/ST0009/impl.md diff --git a/stp/prj/st/COMPLETED/ST0009/info.md b/intent/st/COMPLETED/ST0009/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0009/info.md rename to intent/st/COMPLETED/ST0009/info.md index cae17fd..992a16b 100644 --- a/stp/prj/st/COMPLETED/ST0009/info.md +++ b/intent/st/COMPLETED/ST0009/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250306 completed: 20250603 diff --git a/stp/prj/st/COMPLETED/ST0009/tasks.md b/intent/st/COMPLETED/ST0009/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0009/tasks.md rename to intent/st/COMPLETED/ST0009/tasks.md diff --git a/stp/prj/st/NOT-STARTED/ST0011/design.md b/intent/st/COMPLETED/ST0011/design.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0011/design.md rename to intent/st/COMPLETED/ST0011/design.md diff --git a/stp/prj/st/NOT-STARTED/ST0011/impl.md b/intent/st/COMPLETED/ST0011/impl.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0011/impl.md rename to intent/st/COMPLETED/ST0011/impl.md diff --git a/stp/prj/st/NOT-STARTED/ST0011/info.md b/intent/st/COMPLETED/ST0011/info.md similarity index 97% rename from stp/prj/st/NOT-STARTED/ST0011/info.md rename to intent/st/COMPLETED/ST0011/info.md index fd9933e..e865c92 100644 --- a/stp/prj/st/NOT-STARTED/ST0011/info.md +++ b/intent/st/COMPLETED/ST0011/info.md @@ -1,7 +1,7 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 -status: Not Started +intent_version: 2.0.0 +status: Completed created: 20250603 completed: --- diff --git a/stp/prj/st/NOT-STARTED/ST0011/tasks.md b/intent/st/COMPLETED/ST0011/tasks.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0011/tasks.md rename to intent/st/COMPLETED/ST0011/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0012/design.md b/intent/st/COMPLETED/ST0012/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0012/design.md rename to intent/st/COMPLETED/ST0012/design.md diff --git a/stp/prj/st/COMPLETED/ST0012/impl.md b/intent/st/COMPLETED/ST0012/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0012/impl.md rename to intent/st/COMPLETED/ST0012/impl.md diff --git a/stp/prj/st/COMPLETED/ST0012/info.md b/intent/st/COMPLETED/ST0012/info.md similarity index 98% rename from stp/prj/st/COMPLETED/ST0012/info.md rename to intent/st/COMPLETED/ST0012/info.md index 417d7d0..96796ab 100644 --- a/stp/prj/st/COMPLETED/ST0012/info.md +++ b/intent/st/COMPLETED/ST0012/info.md @@ -1,6 +1,6 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250307 completed: 20250307 diff --git a/stp/prj/st/COMPLETED/ST0012/tasks.md b/intent/st/COMPLETED/ST0012/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0012/tasks.md rename to intent/st/COMPLETED/ST0012/tasks.md diff --git a/stp/prj/st/COMPLETED/ST0013/impl.md b/intent/st/COMPLETED/ST0013/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0013/impl.md rename to intent/st/COMPLETED/ST0013/impl.md diff --git a/stp/prj/st/COMPLETED/ST0013/info.md b/intent/st/COMPLETED/ST0013/info.md similarity index 96% rename from stp/prj/st/COMPLETED/ST0013/info.md rename to intent/st/COMPLETED/ST0013/info.md index c93e512..2d32acd 100644 --- a/stp/prj/st/COMPLETED/ST0013/info.md +++ b/intent/st/COMPLETED/ST0013/info.md @@ -1,15 +1,15 @@ --- verblock: "20 Mar 2025:v0.1: Matthew Sinclair - Updated via STP upgrade" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250311 -completed: 20250708 +completed: 20250727 --- # ST0013: STP Blog Post Series -- **Status**: In Progress +- **Status**: Completed - **Created**: 2025-03-11 -- **Completed**: +- **Completed**: 2025-07-27 - **Author**: Matthew Sinclair ## Summary diff --git a/stp/prj/st/COMPLETED/ST0014/design.md b/intent/st/COMPLETED/ST0014/design.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0014/design.md rename to intent/st/COMPLETED/ST0014/design.md diff --git a/stp/prj/st/COMPLETED/ST0014/impl.md b/intent/st/COMPLETED/ST0014/impl.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0014/impl.md rename to intent/st/COMPLETED/ST0014/impl.md diff --git a/stp/prj/st/COMPLETED/ST0014/info.md b/intent/st/COMPLETED/ST0014/info.md similarity index 97% rename from stp/prj/st/COMPLETED/ST0014/info.md rename to intent/st/COMPLETED/ST0014/info.md index bcb017e..5505211 100644 --- a/stp/prj/st/COMPLETED/ST0014/info.md +++ b/intent/st/COMPLETED/ST0014/info.md @@ -1,6 +1,6 @@ --- verblock: "20 Mar 2025:v0.1: Claude - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Completed created: 20250320 completed: 20250709 diff --git a/stp/prj/st/COMPLETED/ST0014/tasks.md b/intent/st/COMPLETED/ST0014/tasks.md similarity index 100% rename from stp/prj/st/COMPLETED/ST0014/tasks.md rename to intent/st/COMPLETED/ST0014/tasks.md diff --git a/intent/st/COMPLETED/ST0016/design.md b/intent/st/COMPLETED/ST0016/design.md new file mode 100644 index 0000000..3f4f6cd --- /dev/null +++ b/intent/st/COMPLETED/ST0016/design.md @@ -0,0 +1,372 @@ +--- +verblock: "16 Jul 2025:v0.2: Matthew Sinclair - Updated with JSON config and new commands" +stp_version: 2.0.0 +--- +# ST0016: Design Document + +## Overview + +This design document details the comprehensive refactoring of the CLI tool from "stp" to "intent", addressing architectural concerns and modernizing the tool structure. + +**Key Terminology**: +- **intent**: The command-line tool (lowercase) +- **STP**: Steel Thread Process methodology (unchanged) +- **intent_***: Subcommands following the new naming convention +- **Fail-forward approach**: Direct migration to v2.0.0 without incremental steps + +## Phase 0: Test Infrastructure (Foundation) + +### 0.1 Create Example Projects FIRST + +Before any implementation, create comprehensive test fixtures: + +``` +examples/ +├── v0.0.0-project/ # Ancient .stp-config format +├── v1.2.0-project/ # File-based steel threads +├── v1.2.1-project/ # Directory-based steel threads +└── hello-world/ # Clean v2.0.0 structure +``` + +### 0.2 Test Suite Development + +- Comprehensive BATS test suite +- Migration scenario tests +- Self-hosting test cases +- Performance benchmarks +- Error condition tests + +### 0.3 Documentation Templates + +- Migration guide template +- Troubleshooting guide structure +- Release notes format + +## Phase 1: Repository Restructuring + +### 1.1 Current Structure Problems + +``` +stp/ +├── bin/ # WRONG: Tool executables mixed with project artifacts +├── prj/st/ # UNNECESSARILY NESTED: Steel threads +├── eng/ # Project artifacts +└── _templ/ # Tool resources +``` + +Problems identified: + +- Tool executables (stp/bin/) mixed with project artifacts +- Unnecessary nesting (prj/st/ instead of just st/) +- Unclear separation of concerns +- Confusing for users and deployment + +### 1.2 New Clean Structure + +``` +$INTENT_HOME/ # The intent tool repository +├── .intent/ # LOCAL config for intent-on-itself +│ └── config.yml +├── bin/ # Tool executables (moved from stp/bin/) +│ ├── intent +│ ├── intent_* +│ └── stp -> intent # Backwards compatibility +├── lib/ # Tool resources (was stp/_templ/) +│ └── templates/ +├── intent/ # Project artifacts (was stp/) +│ ├── st/ # Steel threads (flattened from prj/st/) +│ ├── eng/ +│ ├── ref/ # Reference docs (renamed from usr/) +│ ├── llm/ +│ └── _archive/ +├── backlog/ # Existing Backlog.md directory +├── examples/ # NEW: Example projects for testing +│ └── hello-world/ # Model project with full structure +├── docs/ # Tool documentation +└── tests/ # Tool tests +``` + +Benefits: + +- Clear separation: tool (bin/, lib/) vs usage (intent/, backlog/) +- Flattened structure (st/ not prj/st/) +- Intuitive organization +- Easy deployment (just copy bin/ and lib/) + +## Phase 2: Configuration System + +### 2.1 Config Locations + +- **Local**: `.intent/config.json` (project-specific) +- **Global**: `~/.config/intent/config.json` (XDG standard) + +### 2.2 Config Format + +```json +{ + "intent_version": "2.0.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "Matthew Sinclair", + "editor": "vim" +} +``` + +### 2.3 Config Loading + +**Loading Order**: +1. Load global config first (`~/.config/intent/config.json`) +2. Overlay local config (`.intent/config.json`) +3. Apply environment variable overrides (highest priority) + +**Project Detection**: +```bash +find_project_root() { + current_dir=$(pwd) + while [ "$current_dir" != "/" ]; do + # New structure + if [ -f "$current_dir/.intent/config.json" ]; then + echo "$current_dir" + return 0 + fi + # Legacy structures + if [ -d "$current_dir/stp/.config" ] || [ -f "$current_dir/.stp-config" ]; then + echo "$current_dir" + return 0 + fi + current_dir=$(dirname "$current_dir") + done + return 1 +} +``` + +### 2.4 New Commands + +#### intent bootstrap +- Initial setup for new installations +- Creates global config directory +- Sets up PATH recommendations +- Detects or uses $INTENT_HOME +- Validates installation + +#### intent doctor +- Configuration diagnostics +- Validates JSON syntax +- Checks for missing dependencies +- Suggests fixes for common issues +- Can auto-fix with --fix flag + +## Phase 3: Model Project & Testing + +### 3.1 Create Example Project + +``` +examples/hello-world/ +├── .intent/ +│ └── config.json # Example config (JSON) +├── intent/ +│ ├── st/ +│ │ ├── ST0001/ # Example steel thread +│ │ └── ST0002/ +│ ├── eng/ +│ │ └── tpd/ +│ └── ref/ +└── backlog/ + └── config.yml # Backlog.md config (unchanged) +``` + +This serves as: + +- Testing ground for upgrade scenarios +- Example for new users +- Reference implementation +- Regression test baseline + +### 3.2 Upgrade Testing Strategy + +Test scenarios: + +1. **v0.0.0 → v2.0.0**: Ancient .stp-config format +2. **v1.2.0 → v2.0.0**: File-based steel threads +3. **v1.2.1 → v2.0.0**: Directory-based steel threads + +Validation tests: + +- All files migrated correctly +- No data loss +- Commands work post-migration +- Rollback capability +- Config format conversion + +### 3.3 Test Implementation + +```bash +# tests/upgrade/comprehensive_test.bats +@test "upgrade from v1.2.1 to v2.0.0" { + # Setup test project + cp -r examples/v1.2.1-project "$TEST_DIR/project" + cd "$TEST_DIR/project" + + # Take snapshot + find . -type f | sort > before.txt + + # Run upgrade + run intent upgrade --yes + + # Verify structure + assert_success + assert [ -f ".intent/config.json" ] + assert [ -d "intent/st" ] + assert [ ! -d "stp/prj/st" ] + + # Verify no data loss + # ... detailed checks +} +``` + +## Phase 4: Migration Implementation + +### 4.1 Upgrade Command Enhancement + +``` +intent upgrade [--dry-run] [--yes] +``` + +Migration steps: + +1. **Detect Version**: + - Check stp_version in known locations + - If unable to determine: fail with clear error message + - No assumptions about unknown versions + +2. **Backup**: Create .backup/ with timestamp + +3. **Migrate Structure**: + ``` + Old New + stp/bin/* → bin/* + stp/_templ/* → lib/templates/* + stp/prj/st/* → intent/st/* + stp/eng/* → intent/eng/* + stp/usr/* → intent/ref/* + stp/.config/* → .intent/* (with format conversion) + .stp-config → .intent/config.json + ``` + +4. **Update Configs**: Convert to JSON format +5. **Verify**: Run validation checks +6. **Update Documentation**: README, CHANGELOG, etc. +7. **Cleanup**: Remove old structure + +### 4.2 Failure Handling + +**Fail-forward approach**: +- No rollback mechanism (not needed) +- Clear error messages on failure +- Backup available for manual recovery if needed +- Focus on getting it right the first time through comprehensive testing + +## Phase 5: Command Updates + +### 5.1 Main Script + +Updates required: + +- Detect invocation name (stp vs intent) +- Load config with new hierarchy +- Use configured directory names +- Support both old and new structures + +### 5.2 All Subcommands + +Changes for each command: + +- Use `$INTENT_DIR` instead of hardcoded "stp" +- Remove hardcoded path assumptions +- Use flattened structure (st/ not prj/st/) +- Maintain backwards compatibility + +## Phase 6: Documentation + +### 6.1 Updates Required + +- All command examples use "intent" +- Directory structure documentation +- Migration guide with examples +- Troubleshooting section +- Configuration reference + +### 6.2 Backwards Compatibility Notes + +Important clarifications: + +- ST#### numbering remains (Steel Thread Process continues) +- "stp" → "intent" is just the CLI tool name +- Existing projects continue working +- Migration is optional but recommended + +## Phase 7: Bootstrap Strategy + +### 7.1 New User Flow + +```bash +# Clone the repository +git clone https://github.com/user/intent.git +cd intent + +# Option 1: Set INTENT_HOME explicitly +export INTENT_HOME=$(pwd) +./bin/intent bootstrap + +# Option 2: Let bootstrap detect location +./bin/intent bootstrap +# Bootstrap will crawl up from current location to find intent directory +``` + +### 7.2 Bootstrap Command Tasks + +1. Detect or validate $INTENT_HOME +2. Create global config directory: `~/.config/intent/` +3. Generate initial global config +4. Add bin/ to PATH recommendations +5. Validate installation +6. Run `intent doctor` to verify + +## Additional Considerations + +### Error Handling Strategy +- Clear, actionable error messages +- Specific version detection failures +- Migration interruption detection +- Config validation errors +- Dependency check failures + +### Performance Optimization +- Config caching within session +- Efficient directory traversal +- Minimal overhead on command execution + +### Integration Updates +- CI/CD pipeline modifications +- GitHub Actions workflow updates +- Documentation site updates +- Release automation + +## Risk Mitigation + +1. **Comprehensive Testing**: Test every upgrade path with model projects +2. **Test-First Development**: Create tests before implementation +3. **Backup Everything**: Full project backup before migration +4. **Clear Error Messages**: Fail fast with helpful diagnostics +5. **Documentation First**: Complete docs before release +6. **intent doctor**: Safety net for configuration issues + +## Success Criteria + +1. **Zero data loss** during upgrades +2. **All existing projects** continue working +3. **Clean separation** of tool vs project artifacts +4. **Intuitive structure** for new users +5. **Robust test coverage** for all scenarios +6. **Smooth migration** experience +7. **Performance maintained** or improved diff --git a/intent/st/COMPLETED/ST0016/impl.md b/intent/st/COMPLETED/ST0016/impl.md new file mode 100644 index 0000000..093399a --- /dev/null +++ b/intent/st/COMPLETED/ST0016/impl.md @@ -0,0 +1,561 @@ +--- +verblock: "16 Jul 2025:v0.2: Matthew Sinclair - Updated with JSON config and new commands" +stp_version: 2.0.0 +--- +# ST0016: Implementation Details + +## Implementation Order + +1. **Create ST0016** with this plan (DONE) +2. **Phase 0: Test Infrastructure** (CRITICAL - DO FIRST): + - Create all example projects (v0.0.0, v1.2.0, v1.2.1, hello-world) + - Write comprehensive BATS test suite + - Document expected behaviors + - Test migration scenarios +3. **Implement new commands**: + - `intent bootstrap` for new installations + - `intent doctor` for diagnostics +4. **Implement configuration system**: + - JSON config parsing + - Config loading hierarchy + - Environment variable handling +5. **Implement restructuring**: + - Move bin/ to top level + - Create lib/ structure + - Flatten intent/ structure +6. **Implement upgrade command**: + - Version detection (with error handling) + - Backup mechanism + - Migration logic + - NO rollback (fail-forward) +7. **Update all existing commands**: + - Config loading + - Path resolution + - Backwards compatibility +8. **Documentation updates**: + - README.md + - CHANGELOG.md + - Migration guide + - Troubleshooting guide +9. **Release v2.0.0** + +## Technical Implementation + +### Config Loading Implementation + +```bash +#!/bin/bash +# Config loading for intent + +load_intent_config() { + # Initialize defaults + INTENT_VERSION="2.0.0" + INTENT_DIR="intent" + BACKLOG_DIR="backlog" + + # Find project root + PROJECT_ROOT=$(find_project_root) + + # Load global config (XDG standard location) + if [ -f "$HOME/.config/intent/config.json" ]; then + eval "$(parse_json "$HOME/.config/intent/config.json" "global_")" + [ -n "$global_intent_dir" ] && INTENT_DIR="$global_intent_dir" + [ -n "$global_backlog_dir" ] && BACKLOG_DIR="$global_backlog_dir" + [ -n "$global_author" ] && AUTHOR="$global_author" + [ -n "$global_editor" ] && EDITOR="$global_editor" + fi + + # Load local config (overrides global) + if [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + eval "$(parse_json "$PROJECT_ROOT/.intent/config.json" "local_")" + [ -n "$local_intent_dir" ] && INTENT_DIR="$local_intent_dir" + [ -n "$local_backlog_dir" ] && BACKLOG_DIR="$local_backlog_dir" + [ -n "$local_author" ] && AUTHOR="$local_author" + [ -n "$local_editor" ] && EDITOR="$local_editor" + fi + + # Environment variables override all + [ -n "$INTENT_DIR_OVERRIDE" ] && INTENT_DIR="$INTENT_DIR_OVERRIDE" + [ -n "$BACKLOG_DIR_OVERRIDE" ] && BACKLOG_DIR="$BACKLOG_DIR_OVERRIDE" + + # Legacy support: check for stp directory if intent doesn't exist + if [ ! -d "$PROJECT_ROOT/$INTENT_DIR" ] && [ -d "$PROJECT_ROOT/stp" ]; then + INTENT_DIR="stp" + fi + + # Export for use in subcommands + export INTENT_VERSION INTENT_DIR BACKLOG_DIR AUTHOR EDITOR PROJECT_ROOT +} + +parse_json() { + local file=$1 + local prefix=$2 + # Simple JSON parser for flat config structure + # Extracts key-value pairs from JSON + grep -E '^\s*"[^"]+"\s*:\s*"[^"]*"' "$file" | \ + sed -E 's/^\s*"([^"]+)"\s*:\s*"([^"]*)".*/\1="\2"/' | \ + sed -e "s/^/${prefix}/" +} + +find_project_root() { + local current_dir=$(pwd) + while [ "$current_dir" != "/" ]; do + # New structure + if [ -f "$current_dir/.intent/config.json" ]; then + echo "$current_dir" + return 0 + fi + # Legacy structures + if [ -d "$current_dir/stp/.config" ] || [ -f "$current_dir/.stp-config" ]; then + echo "$current_dir" + return 0 + fi + current_dir=$(dirname "$current_dir") + done + # No project root found + return 1 +} +``` + +### Bootstrap Command Implementation + +```bash +#!/bin/bash +# intent_bootstrap - Initial setup for new installations + +bootstrap_intent() { + echo "Intent Bootstrap v2.0.0" + echo "=======================" + + # 1. Detect or validate INTENT_HOME + if [ -z "$INTENT_HOME" ]; then + echo "INTENT_HOME not set, detecting installation directory..." + # Crawl up from current location to find intent directory + local current_dir=$(pwd) + while [ "$current_dir" != "/" ]; do + if [ -f "$current_dir/bin/intent" ] && [ -d "$current_dir/lib" ]; then + INTENT_HOME="$current_dir" + echo "Found intent installation at: $INTENT_HOME" + break + fi + current_dir=$(dirname "$current_dir") + done + + if [ -z "$INTENT_HOME" ]; then + echo "ERROR: Could not detect intent installation directory" + echo "Please set INTENT_HOME and run bootstrap again" + exit 1 + fi + fi + + # 2. Validate installation + if [ ! -f "$INTENT_HOME/bin/intent" ]; then + echo "ERROR: Invalid INTENT_HOME - intent executable not found" + exit 1 + fi + + # 3. Create global config directory + echo "Creating global config directory..." + mkdir -p "$HOME/.config/intent" + + # 4. Generate initial global config if it doesn't exist + if [ ! -f "$HOME/.config/intent/config.json" ]; then + echo "Creating default global configuration..." + cat > "$HOME/.config/intent/config.json" << EOF +{ + "intent_version": "2.0.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "${USER}", + "editor": "${EDITOR:-vim}" +} +EOF + fi + + # 5. PATH setup recommendations + echo "" + echo "Setup complete! Add the following to your shell configuration:" + echo "" + echo " export INTENT_HOME=\"$INTENT_HOME\"" + echo " export PATH=\"\$INTENT_HOME/bin:\$PATH\"" + echo "" + + # 6. Run doctor to verify + echo "Running intent doctor to verify installation..." + "$INTENT_HOME/bin/intent" doctor +} +``` + +### Doctor Command Implementation + +```bash +#!/bin/bash +# intent_doctor - Configuration diagnostics and fixes + +doctor_check() { + local fix_mode=false + [ "$1" = "--fix" ] && fix_mode=true + + echo "Intent Doctor v2.0.0" + echo "====================" + echo "" + + local errors=0 + local warnings=0 + + # Check 1: INTENT_HOME + echo -n "Checking INTENT_HOME... " + if [ -z "$INTENT_HOME" ]; then + echo "ERROR: Not set" + ((errors++)) + if [ "$fix_mode" = true ]; then + echo " FIX: Please run 'intent bootstrap' to set up INTENT_HOME" + fi + elif [ ! -d "$INTENT_HOME" ]; then + echo "ERROR: Directory does not exist" + ((errors++)) + else + echo "OK ($INTENT_HOME)" + fi + + # Check 2: Executables + echo -n "Checking intent executable... " + if [ -f "$INTENT_HOME/bin/intent" ] && [ -x "$INTENT_HOME/bin/intent" ]; then + echo "OK" + else + echo "ERROR: Not found or not executable" + ((errors++)) + fi + + # Check 3: Global config + echo -n "Checking global config... " + if [ -f "$HOME/.config/intent/config.json" ]; then + # Validate JSON syntax + if grep -qE '^\s*\{.*\}\s*$' "$HOME/.config/intent/config.json" 2>/dev/null; then + echo "OK" + else + echo "ERROR: Invalid JSON syntax" + ((errors++)) + if [ "$fix_mode" = true ]; then + echo " FIX: Backing up and creating new config..." + mv "$HOME/.config/intent/config.json" "$HOME/.config/intent/config.json.bak" + bootstrap_intent >/dev/null 2>&1 + fi + fi + else + echo "WARNING: Not found" + ((warnings++)) + if [ "$fix_mode" = true ]; then + echo " FIX: Creating default global config..." + mkdir -p "$HOME/.config/intent" + bootstrap_intent >/dev/null 2>&1 + fi + fi + + # Check 4: Local config (if in project) + if [ -n "$PROJECT_ROOT" ] && [ -f "$PROJECT_ROOT/.intent/config.json" ]; then + echo -n "Checking local config... " + if grep -qE '^\s*\{.*\}\s*$' "$PROJECT_ROOT/.intent/config.json" 2>/dev/null; then + echo "OK" + else + echo "ERROR: Invalid JSON syntax" + ((errors++)) + fi + fi + + # Check 5: PATH + echo -n "Checking PATH... " + if echo "$PATH" | grep -q "$INTENT_HOME/bin"; then + echo "OK" + else + echo "WARNING: $INTENT_HOME/bin not in PATH" + ((warnings++)) + fi + + # Summary + echo "" + echo "Summary:" + echo " Errors: $errors" + echo " Warnings: $warnings" + + if [ $errors -eq 0 ] && [ $warnings -eq 0 ]; then + echo "" + echo "✓ All checks passed!" + return 0 + elif [ "$fix_mode" = false ] && [ $errors -gt 0 ]; then + echo "" + echo "Run 'intent doctor --fix' to attempt automatic fixes" + return 1 + fi + + return $errors +} +``` + +### Upgrade Command Implementation + +```bash +#!/bin/bash +# intent_upgrade implementation + +upgrade_to_v2() { + local dry_run=false + local auto_yes=false + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) dry_run=true ;; + --yes) auto_yes=true ;; + *) echo "Unknown option: $1"; exit 1 ;; + esac + shift + done + + # Detect current version + local current_version=$(detect_stp_version) + + if [ -z "$current_version" ]; then + echo "ERROR: Unable to determine current STP version" + echo "" + echo "This could mean:" + echo " 1. This is not an STP/intent project" + echo " 2. The project structure is corrupted" + echo " 3. This is a very old version we don't recognize" + echo "" + echo "Please verify this is an STP project before proceeding." + exit 1 + fi + + echo "Current version: $current_version" + + if [ "$current_version" = "2.0.0" ]; then + echo "Already at version 2.0.0" + return 0 + fi + + # Create backup + local backup_dir=".backup_$(date +%Y%m%d_%H%M%S)" + if [ "$dry_run" = false ]; then + echo "Creating backup in $backup_dir..." + mkdir -p "$backup_dir" + + # Backup all relevant directories + [ -d "stp" ] && cp -r stp "$backup_dir/" + [ -f ".stp-config" ] && cp .stp-config "$backup_dir/" + [ -d ".intent" ] && cp -r .intent "$backup_dir/" + fi + + # Migration plan + echo -e "\nMigration plan:" + echo "1. Move stp/bin/* → bin/" + echo "2. Move stp/_templ/* → lib/templates/" + echo "3. Move stp/prj/st/* → intent/st/" + echo "4. Move stp/eng/* → intent/eng/" + echo "5. Move stp/usr/* → intent/ref/" + echo "6. Convert configs to JSON format" + echo "7. Create .intent/config.json" + + if [ "$auto_yes" = false ] && [ "$dry_run" = false ]; then + read -p "Proceed with migration? (y/n) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Migration cancelled" + return 1 + fi + fi + + if [ "$dry_run" = true ]; then + echo -e "\n[DRY RUN] No changes made" + return 0 + fi + + # Perform migration + echo -e "\nPerforming migration..." + + # Move bin files + if [ -d "stp/bin" ]; then + echo "Moving executables to bin/..." + mkdir -p bin + mv stp/bin/* bin/ + # Rename stp to intent + [ -f "bin/stp" ] && mv bin/stp bin/intent + # Create compatibility symlink + ln -s intent bin/stp + # Rename all stp_* to intent_* + for file in bin/stp_*; do + [ -f "$file" ] && mv "$file" "${file/stp_/intent_}" + done + fi + + # Move templates + if [ -d "stp/_templ" ]; then + echo "Moving templates to lib/..." + mkdir -p lib + mv stp/_templ lib/templates + fi + + # Create intent directory and move content + mkdir -p intent + + # Flatten steel threads + if [ -d "stp/prj/st" ]; then + echo "Flattening steel thread structure..." + mkdir -p intent/st + # Move all subdirectories + for dir in stp/prj/st/*/; do + [ -d "$dir" ] && mv "$dir" intent/st/ + done + # Move any files + find stp/prj/st -maxdepth 1 -type f -exec mv {} intent/st/ \; + fi + + # Move other directories + [ -d "stp/eng" ] && mv stp/eng intent/ + [ -d "stp/usr" ] && mv stp/usr intent/ref + [ -d "stp/llm" ] && mv stp/llm intent/ + [ -d "stp/_archive" ] && mv stp/_archive intent/ + + # Create config + echo "Creating .intent/config.json..." + mkdir -p .intent + cat > .intent/config.json << EOF +{ + "intent_version": "2.0.0", + "intent_dir": "intent", + "backlog_dir": "backlog", + "author": "${AUTHOR:-$USER}", + "editor": "${EDITOR:-vim}" +} +EOF + + # Cleanup old structure + if [ -d "stp" ]; then + # Check if directory is empty + if [ -z "$(ls -A stp)" ]; then + rmdir stp + else + echo "Warning: stp/ directory not empty, manual cleanup required" + fi + fi + + echo -e "\nMigration complete!" + echo "Backup saved in: $backup_dir" + echo "" + echo "Next steps:" + echo "1. Update your PATH to include the new bin/ directory" + echo "2. Run 'intent doctor' to verify the migration" + echo "3. Review the changes and test your commands" +} + +detect_stp_version() { + # Check multiple locations for version information + + # 1. Check .intent/config.json (v2.0.0+) + if [ -f ".intent/config.json" ]; then + local version=$(grep -E '"intent_version"' ".intent/config.json" | sed -E 's/.*"intent_version"[[:space:]]*:[[:space:]]*"([^"]+)".*/\1/') + [ -n "$version" ] && echo "$version" && return 0 + fi + + # 2. Check stp/.config/version (v1.2.0+) + if [ -f "stp/.config/version" ]; then + local version=$(grep -E '^stp_version:' "stp/.config/version" | sed 's/stp_version:[[:space:]]*//') + [ -n "$version" ] && echo "$version" && return 0 + fi + + # 3. Check for .stp-config (v0.0.0) + if [ -f ".stp-config" ]; then + echo "0.0.0" + return 0 + fi + + # 4. Check for stp directory structure (assume v1.0.0) + if [ -d "stp/prj/st" ]; then + echo "1.0.0" + return 0 + fi + + # Unable to determine version + return 1 +} +``` + +### Main Script Updates + +```bash +#!/bin/bash +# Main intent script + +# Detect if called as 'stp' for compatibility +SCRIPT_NAME=$(basename "$0") +if [ "$SCRIPT_NAME" = "stp" ]; then + COMPAT_MODE=true +else + COMPAT_MODE=false +fi + +# Load configuration +source "$(dirname "$0")/intent_config" +load_intent_config + +# Version +VERSION="2.0.0" + +# Command routing +case "$1" in + bootstrap) + shift + exec "$INTENT_ROOT/bin/intent_bootstrap" "$@" + ;; + doctor) + shift + exec "$INTENT_ROOT/bin/intent_doctor" "$@" + ;; + init) + shift + exec "$INTENT_ROOT/bin/intent_init" "$@" + ;; + st|steel-thread) + shift + exec "$INTENT_ROOT/bin/intent_st" "$@" + ;; + upgrade) + shift + exec "$INTENT_ROOT/bin/intent_upgrade" "$@" + ;; + # ... other commands + *) + if [ "$COMPAT_MODE" = true ]; then + echo "Note: 'stp' command is deprecated, please use 'intent'" + fi + show_help + ;; +esac +``` + +### Path Updates for All Commands + +Each command needs updates like: + +```bash +# Before +ST_DIR="$PROJECT_ROOT/stp/prj/st" + +# After +ST_DIR="$PROJECT_ROOT/$INTENT_DIR/st" +``` + +### Testing Implementation + +Create test fixtures for each version: + +- `examples/v0.0.0-project/` - Ancient .stp-config format +- `examples/v1.2.0-project/` - File-based steel threads +- `examples/v1.2.1-project/` - Directory-based steel threads +- `examples/hello-world/` - New v2.0.0 structure + +Run comprehensive tests before release: + +```bash +./tests/run_upgrade_tests.sh +``` diff --git a/intent/st/COMPLETED/ST0016/info.md b/intent/st/COMPLETED/ST0016/info.md new file mode 100644 index 0000000..42a365b --- /dev/null +++ b/intent/st/COMPLETED/ST0016/info.md @@ -0,0 +1,67 @@ +--- +verblock: "16 Jul 2025:v0.2: Matthew Sinclair - Updated with JSON config and new commands" +intent_version: 2.0.0 +status: Completed +created: 20250311 +completed: 20250727 +--- +# ST0016: Rename STP CLI to INTENT (v2.0.0) + +## Objective + +Major refactoring to rename STP to INTENT with clean separation of concerns: + +- Tool executables move to top-level bin/ +- Project artifacts in intent/ (flattened structure) +- Robust upgrade path with comprehensive testing +- Full backwards compatibility +- New `intent bootstrap` command for easy setup +- New `intent doctor` command for diagnostics + +## Context + +The STP project has evolved significantly, and we've identified architectural issues that conflate tool executables with project artifacts. This refactoring addresses these concerns by: + +1. **Separating tool from usage**: Moving executables to top-level bin/ while keeping project artifacts in intent/ +2. **Flattening structure**: Removing unnecessary nesting (prj/st/ becomes st/) +3. **Modern configuration**: JSON-based local/global config system +4. **Robust migration**: Comprehensive upgrade command with testing (fail-forward approach) +5. **Clear naming**: "intent" better reflects the tool's purpose while maintaining ST#### methodology + +Key architectural improvements: + +- Clean separation between the intent tool (bin/, lib/) and its usage (intent/, backlog/) +- Configurable directory names via JSON configuration +- Full backwards compatibility for existing STP projects +- Model project for testing and examples + +## Related Steel Threads + +- ST0001: Directory Structure (established initial structure) +- ST0014: Directory Structure for Steel Threads (introduced directory-based STs) +- ST0015: Enhanced Steel Thread Templates (future enhancements) + +## Context for LLM + +This is a major version 2.0.0 refactoring that renames the CLI from "stp" to "intent" while maintaining full backwards compatibility. The Steel Thread Process methodology remains unchanged (ST#### numbering continues). + +Key points: + +1. The tool repository itself uses intent on itself (meta usage) +2. bin/ and lib/ are tool components, not project artifacts +3. intent/ and backlog/ are project artifacts from using the tool +4. .intent/config.json is local config, ~/.config/intent/config.json is global (XDG standard) +5. Comprehensive testing via model projects before release + +### Implementation Phases + +0. **Test Infrastructure**: Create all example projects and test suite FIRST +1. **New Commands**: Implement `intent bootstrap` and `intent doctor` +2. **Configuration System**: JSON-based local/global configs with proper loading hierarchy +3. **Repository Restructuring**: Move executables and flatten directories +4. **Migration Implementation**: Robust upgrade command with backup (no rollback - fail forward) +5. **Command Updates**: Update all subcommands for new structure +6. **Documentation**: Update all references and guides +7. **Bootstrap & Release**: Final testing and v2.0.0 release + +See design.md for detailed phase descriptions and impl.md for technical implementation details. diff --git a/intent/st/COMPLETED/ST0016/phase0_summary.md b/intent/st/COMPLETED/ST0016/phase0_summary.md new file mode 100644 index 0000000..2353a68 --- /dev/null +++ b/intent/st/COMPLETED/ST0016/phase0_summary.md @@ -0,0 +1,114 @@ +# ST0016: Phase 0 Completion Summary + +## Overview + +Phase 0 (Test Infrastructure) has been completed successfully. This foundation ensures we can validate each subsequent phase of the Intent v2.0.0 implementation. + +## Completed Items + +### 1. Example Projects Created + +Location: `/Users/matts/Devel/prj/STP/examples/` + +#### v0.0.0-project + +- Ancient format with `.stp-config` YAML file +- File-based steel threads without frontmatter +- Represents the oldest supported version + +#### v1.2.0-project + +- Uses `stp/.config/version` for version tracking +- File-based steel threads with YAML frontmatter +- Section markers in steel_threads.md + +#### v1.2.1-project + +- Directory-based steel threads (ST####/info.md structure) +- Enhanced metadata with verblock +- Current production version + +#### hello-world + +- Target v2.0.0 structure +- JSON configuration (`.intent/config.json`) +- Flattened directories (intent/st/ not stp/prj/st/) +- Clean separation of tool vs project artifacts + +### 2. Test Suites Created + +Location: `/Users/matts/Devel/prj/STP/stp/tests/` + +#### upgrade/comprehensive_test.bats + +- Version detection tests for all formats +- Migration scenario tests (v0.0.0 → v2.0.0, etc.) +- Backup creation validation +- Dry-run mode testing +- Error handling tests + +#### intent/intent_bootstrap_test.bats + +- Global config directory creation +- Default config.json generation +- INTENT_HOME detection +- PATH setup instructions +- Doctor integration + +#### intent/intent_doctor_test.bats + +- Environment variable checks +- Executable validation +- Config file syntax validation +- PATH verification +- --fix mode testing + +#### intent/json_config_test.bats + +- JSON parsing with sed/grep +- Config loading hierarchy +- Environment variable overrides +- Special character handling + +### 3. Backlog.md Integration + +Created 12 tasks for ST0016: + +- task-59: Create examples directory structure +- task-60: Create v0.0.0 example project +- task-61: Create v1.2.0 example project +- task-62: Create v1.2.1 example project +- task-63: Create hello-world v2.0.0 project +- task-64: Write comprehensive BATS tests +- task-65: Implement intent_bootstrap in top-level bin +- task-66: Implement intent_doctor in top-level bin +- task-67: Create JSON config parser in top-level bin +- task-68: Implement intent_upgrade for migrations +- task-69: Test migrations on example projects +- task-70: Execute self-migration to new structure + +## Next Steps + +Ready to begin Phase 1: New Commands Implementation + +1. Implement intent_bootstrap command +2. Implement intent_doctor command +3. Create shared JSON config parser + +## Key Insights + +1. **Test Coverage**: We have comprehensive tests ready for all major components +2. **Migration Paths**: Clear examples of each version make migration logic straightforward +3. **No External Dependencies**: JSON parsing with sed/grep avoids jq dependency +4. **Fail-Forward Approach**: No rollback needed, tests ensure we get it right + +## Files Created/Modified + +- Created: `/examples/` directory with 4 example projects +- Created: Multiple test files in `stp/tests/` +- Updated: `ST0016/results.md` with Phase 0 progress +- Created: 12 Backlog.md tasks for tracking + +## Time Spent + +Phase 0 completed in single session, establishing solid foundation for implementation phases. diff --git a/intent/st/COMPLETED/ST0016/phase1_plan.md b/intent/st/COMPLETED/ST0016/phase1_plan.md new file mode 100644 index 0000000..df6df2c --- /dev/null +++ b/intent/st/COMPLETED/ST0016/phase1_plan.md @@ -0,0 +1,149 @@ +# Phase 1: New Commands Implementation Plan + +## Overview + +Phase 1 focuses on implementing the new Intent v2.0.0 commands in the top-level `bin/` directory. These commands provide essential functionality for the new architecture. + +## Objectives + +1. Create top-level `bin/` directory structure +2. Implement `intent_bootstrap` command for initial setup +3. Implement `intent_doctor` command for diagnostics +4. Create shared `intent_config` library for JSON parsing +5. Ensure all commands work without external dependencies + +## Task Breakdown + +### 1. Create Directory Structure + +```bash +mkdir -p /Users/matts/Devel/prj/STP/bin +mkdir -p /Users/matts/Devel/prj/STP/lib +``` + +### 2. Implement intent_config Library + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_config` + +This shared library will provide: +- `parse_json()` function using sed/grep (no jq dependency) +- `load_intent_config()` function for config hierarchy +- `find_project_root()` function for project detection +- Common variables and defaults + +Key features: +- Parse JSON without external tools +- Handle global → local → environment variable precedence +- Support legacy STP project detection +- Export configuration for use by other commands + +### 3. Implement intent_bootstrap Command + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_bootstrap` + +Functionality: +1. **Auto-detect INTENT_HOME**: + - If not set, crawl up from script location + - Look for bin/intent and lib/ directory + - Validate the installation + +2. **Create global config**: + - Create `~/.config/intent/` directory (XDG standard) + - Generate default `config.json` if not exists + - Use current user and editor from environment + +3. **PATH setup**: + - Display clear instructions for shell configuration + - Show export commands for INTENT_HOME and PATH + +4. **Verification**: + - Run `intent doctor` to verify setup + - Display success message + +### 4. Implement intent_doctor Command + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_doctor` + +Checks to perform: +1. **INTENT_HOME**: Set and valid directory exists +2. **Executables**: intent binary exists and is executable +3. **Global config**: Exists and has valid JSON syntax +4. **Local config**: If in project, check syntax +5. **PATH**: Verify $INTENT_HOME/bin is in PATH +6. **Permissions**: Check file permissions +7. **Dependencies**: Verify required tools (bash, sed, grep) + +Features: +- Normal mode: Report issues +- `--fix` mode: Attempt automatic repairs +- Summary with error/warning counts +- Exit codes: 0 for success, 1+ for errors + +### 5. Create Compatibility Wrapper + +**File**: `/Users/matts/Devel/prj/STP/bin/intent` + +This will be a copy of the current `stp` script, modified to: +- Load the new config system +- Detect if called as 'stp' for compatibility warnings +- Route to intent_* subcommands +- Support both old and new project structures during transition + +### 6. Testing Strategy + +After implementing each command: + +1. **Unit tests**: Run the BATS tests we created in Phase 0 +2. **Integration tests**: + - Test bootstrap on clean system + - Test doctor with various configurations + - Test config loading hierarchy +3. **Example project tests**: + - Verify commands work with v2.0.0 hello-world project + - Ensure legacy detection works with older examples + +## Implementation Order + +1. **intent_config** (foundation for other commands) +2. **intent_bootstrap** (needed for initial setup) +3. **intent_doctor** (validates bootstrap worked) +4. **intent** (main wrapper, minimal changes from stp) + +## File Permissions + +All executables will need: +```bash +chmod +x /Users/matts/Devel/prj/STP/bin/intent* +``` + +## Success Criteria + +1. ✓ Bootstrap creates valid global config +2. ✓ Doctor correctly identifies all issues +3. ✓ Doctor --fix repairs common problems +4. ✓ Config loading respects hierarchy +5. ✓ JSON parsing works without jq +6. ✓ All BATS tests pass +7. ✓ Commands work on example projects + +## Risk Mitigation + +1. **No external dependencies**: Use only bash built-ins and standard Unix tools +2. **Backwards compatibility**: Detect legacy structures +3. **Clear error messages**: Help users understand issues +4. **Atomic operations**: Don't leave system in broken state +5. **Backup before modify**: Doctor --fix backs up files + +## Notes + +- These commands will initially coexist with stp/bin/* commands +- The actual migration (moving stp/bin/* to bin/) happens in Phase 3 +- Focus on getting the new commands working perfectly first +- Use the implementation details from ST0016/impl.md as reference + +## Next Steps After Phase 1 + +Once these commands are working: +- Phase 2: Implement full configuration system +- Phase 3: Repository restructuring with intent_upgrade +- Phase 4: Update all existing commands \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0016/phase1_summary.md b/intent/st/COMPLETED/ST0016/phase1_summary.md new file mode 100644 index 0000000..b4d73ed --- /dev/null +++ b/intent/st/COMPLETED/ST0016/phase1_summary.md @@ -0,0 +1,110 @@ +# ST0016: Phase 1 Completion Summary + +## Overview + +Phase 1 (New Commands Implementation) has been completed successfully. All new Intent v2.0.0 commands are working in the top-level `/bin/` directory. + +## Completed Items + +### 1. Directory Structure +- Created `/Users/matts/Devel/prj/STP/bin/` (top-level executables) +- Created `/Users/matts/Devel/prj/STP/lib/` (for future templates) + +### 2. Core Library: intent_config +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_config` + +Features implemented: +- JSON parsing using `jq` (simplified from regex approach) +- Configuration loading hierarchy (global → local → environment) +- Project root detection (supports v0.0.0, v1.2.0, v1.2.1, and v2.0.0) +- Legacy project support (auto-detects stp directory) +- Configuration validation +- Shared functions for all intent commands + +### 3. Bootstrap Command: intent_bootstrap +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_bootstrap` + +Features implemented: +- Auto-detects INTENT_HOME from script location +- Creates `~/.config/intent/config.json` (XDG standard) +- Provides clear PATH setup instructions +- Supports --force to recreate config +- Runs doctor to verify setup +- Handles existing configs gracefully + +### 4. Doctor Command: intent_doctor +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_doctor` + +Features implemented: +- Checks INTENT_HOME environment +- Validates intent executable +- Verifies JSON configuration syntax +- Checks PATH includes intent/bin +- Validates required tools (including jq) +- File permission checks (verbose mode) +- --fix mode for automatic repairs +- Clear error/warning reporting with counts + +### 5. Main Wrapper: intent +**Location**: `/Users/matts/Devel/prj/STP/bin/intent` + +Features implemented: +- Minimal wrapper for Phase 1 testing +- Routes to bootstrap and doctor commands +- Version reporting (2.0.0-alpha) +- Help system +- Ready for expansion in later phases + +## Key Design Decision: Using jq + +After initially implementing regex-based JSON parsing, we switched to requiring `jq` as a dependency. This decision: +- Simplifies code significantly +- Provides robust JSON handling +- Follows the same pattern as Backlog.md dependency +- Doctor checks for jq and provides installation instructions + +## Testing Results + +### Command Tests +✅ `intent version` - Shows version 2.0.0-alpha +✅ `intent help` - Displays usage information +✅ `intent bootstrap` - Creates global config successfully +✅ `intent doctor` - Validates configuration correctly +✅ `intent doctor --verbose` - Shows detailed information +✅ `intent doctor --fix` - Can repair issues + +### Configuration Tests +✅ Global config loaded from `~/.config/intent/config.json` +✅ Local config overrides global settings +✅ Legacy project detection (STP directory) +✅ v2.0.0 project config (hello-world example) + +## Next Steps + +Ready for Phase 2: Configuration System +- Enhance config loading for all commands +- Implement project initialization with new structure +- Prepare for migration implementation + +## Files Created/Modified + +### Created +- `/bin/intent` - Main command wrapper +- `/bin/intent_bootstrap` - Setup command +- `/bin/intent_doctor` - Diagnostic command +- `/bin/intent_config` - Shared configuration library + +### Modified +- Updated to use `jq` for JSON parsing throughout + +## Key Insights + +1. **jq Dependency**: Much cleaner than regex parsing +2. **Doctor Command**: Essential for troubleshooting +3. **Config Hierarchy**: Works well for global/local settings +4. **Legacy Support**: Auto-detection helps transition +5. **Top-level bin/**: Clear separation from project artifacts + +## Time Spent + +Phase 1 completed in single session, with mid-course correction to use jq instead of regex parsing. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0016/phase2_plan.md b/intent/st/COMPLETED/ST0016/phase2_plan.md new file mode 100644 index 0000000..bfdd47f --- /dev/null +++ b/intent/st/COMPLETED/ST0016/phase2_plan.md @@ -0,0 +1,164 @@ +# Phase 2: Migration Implementation Plan + +## Overview + +Phase 2 focuses on implementing the migration functionality that will transform projects from any STP version to Intent v2.0.0. This includes the upgrade command and initial project initialization. + +## Objectives + +1. Implement `intent_init` for creating new v2.0.0 projects +2. Implement `intent_upgrade` for migrating existing projects +3. Create version detection logic with clear error handling +4. Implement backup mechanism +5. Build migration logic for each version +6. Test migrations on example projects + +## Task Breakdown + +### 1. Implement intent_init Command + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_init` + +This command creates a new Intent v2.0.0 project structure: +- Create `.intent/config.json` with project settings +- Create `intent/` directory structure (flattened) +- Create `backlog/` directory with config.yml +- Initialize first steel thread (optional) +- Set up `.gitignore` appropriately + +Key features: +- Use loaded configuration for defaults +- Allow customization via flags +- Create clean v2.0.0 structure (no legacy) +- Integration with Backlog.md + +### 2. Implement intent_upgrade Command + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_upgrade` + +This is the core migration command that handles all version upgrades: + +#### 2.1 Version Detection +```bash +detect_stp_version() { + # Check .intent/config.json (v2.0.0) + # Check stp/.config/version (v1.2.0+) + # Check .stp-config (v0.0.0) + # Check directory structure patterns + # Return version or error +} +``` + +#### 2.2 Backup Creation +```bash +create_backup() { + local backup_dir=".backup_$(date +%Y%m%d_%H%M%S)" + # Copy all relevant directories + # Create manifest of backed up files + # Return backup location +} +``` + +#### 2.3 Migration Functions +```bash +migrate_v0_0_0_to_v2_0_0() { + # Convert .stp-config to .intent/config.json + # Move stp/prj/st/* to intent/st/ + # Flatten directory structure + # Update file metadata +} + +migrate_v1_2_0_to_v2_0_0() { + # Convert YAML configs to JSON + # Move file-based steel threads + # Update directory structure +} + +migrate_v1_2_1_to_v2_0_0() { + # Move directory-based steel threads + # Convert configs to JSON + # Update frontmatter +} +``` + +#### 2.4 Command Options +- `--dry-run`: Show what would be done without changes +- `--yes`: Skip confirmation prompts +- `--verbose`: Show detailed progress +- `--backup-only`: Create backup without migrating + +### 3. Update intent Main Script + +**File**: `/Users/matts/Devel/prj/STP/bin/intent` + +Add routing for new commands: +- `intent init [project-name]` +- `intent upgrade [options]` + +### 4. Create Helper Functions + +**File**: `/Users/matts/Devel/prj/STP/bin/intent_helpers` + +Shared functions for migration: +- `convert_yaml_to_json()` - Convert YAML frontmatter +- `update_frontmatter()` - Change stp_version to intent_version +- `flatten_directory()` - Remove prj/ nesting +- `create_directory_structure()` - Standard v2.0.0 layout + +### 5. Testing Strategy + +#### 5.1 Test intent_init +- Create new project in temp directory +- Verify all directories created +- Check config.json is valid +- Ensure backlog integration works + +#### 5.2 Test intent_upgrade +For each example project (v0.0.0, v1.2.0, v1.2.1): +1. Copy to temp directory +2. Run upgrade +3. Verify: + - Backup created + - Files moved correctly + - Configs converted to JSON + - No data lost + - Commands work post-migration + +#### 5.3 Edge Cases +- Empty projects +- Projects with custom structures +- Projects with invalid configs +- Interrupted migrations + +## Implementation Order + +1. **intent_helpers** - Shared functions +2. **intent_init** - New project creation +3. **intent_upgrade** - Migration command +4. **Update intent** - Add new command routing +5. **Test with examples** - Verify all migrations work + +## Success Criteria + +1. ✓ New projects created with clean v2.0.0 structure +2. ✓ All example projects migrate successfully +3. ✓ Backups created before any changes +4. ✓ Clear error messages for unknown versions +5. ✓ No data loss during migration +6. ✓ Dry-run mode shows accurate preview +7. ✓ All tests pass + +## Risk Mitigation + +1. **Comprehensive Backups**: Always create timestamped backup +2. **Dry Run First**: Allow preview before changes +3. **Version Detection**: Fail clearly if version unknown +4. **Atomic Operations**: Use temp files, then move +5. **Test Coverage**: Test each migration path thoroughly + +## Notes + +- This phase prepares for the actual repository restructuring in Phase 3 +- We're building the tools that will perform the migration +- The commands work with the current structure but prepare for the new one +- Focus on getting migrations working perfectly before moving files \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0016/phase2_summary.md b/intent/st/COMPLETED/ST0016/phase2_summary.md new file mode 100644 index 0000000..11d3ff0 --- /dev/null +++ b/intent/st/COMPLETED/ST0016/phase2_summary.md @@ -0,0 +1,123 @@ +# ST0016: Phase 2 Completion Summary + +## Overview + +Phase 2 (Migration Implementation) has been completed successfully. We now have fully functional `init` and `upgrade` commands that can create new Intent v2.0.0 projects and migrate existing STP projects. + +## Completed Items + +### 1. Helper Functions: intent_helpers +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_helpers` + +Shared utilities implemented: +- `convert_yaml_frontmatter()` - Convert YAML to v2.0.0 format +- `update_version_in_frontmatter()` - Change stp_version to intent_version +- `convert_yaml_config_to_json()` - Handle .stp-config conversion +- `create_v2_directory_structure()` - Standard directory layout +- `flatten_directory_structure()` - Remove prj/ nesting +- `detect_project_version()` - Identify STP versions +- `create_project_backup()` - Timestamped backups +- Migration helpers for counting files and showing summaries + +### 2. Init Command: intent_init +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_init` + +Features implemented: +- Create new Intent v2.0.0 projects +- Clean directory structure (no legacy) +- JSON configuration from the start +- Git initialization (optional) +- Backlog.md integration +- --with-st flag creates first steel thread +- Proper error handling for existing projects + +### 3. Upgrade Command: intent_upgrade +**Location**: `/Users/matts/Devel/prj/STP/bin/intent_upgrade` + +Features implemented: +- Detect all STP versions (0.0.0, 1.x, 1.2.0, 1.2.1) +- Clear error messages for unknown versions +- Timestamped backups before migration +- Version-specific migration logic: + - v0.0.0: Convert .stp-config, flatten deeply nested structure + - v1.2.0: Create JSON config, flatten directories + - v1.2.1: Same as v1.2.0 (handles directory-based STs) +- Options: + - --dry-run: Preview without changes + - --yes: Skip confirmation + - --verbose: Detailed progress + - --backup-only: Just create backup + - --no-backup: Skip backup (dangerous) +- Clean up empty directories after migration + +### 4. Main Script Updates +**Location**: `/Users/matts/Devel/prj/STP/bin/intent` + +- Added routing for `init` and `upgrade` commands +- Updated help text + +## Testing Results + +### Intent Init Test +✅ Created new project with v2.0.0 structure +✅ Generated valid JSON config +✅ Created steel thread with --with-st +✅ Proper .gitignore created +✅ Backlog.md integration worked + +### Intent Upgrade Tests +✅ v0.0.0 → v2.0.0 migration successful +✅ Dry-run mode showed accurate preview +✅ .stp-config converted to JSON correctly +✅ Directory structure flattened properly +✅ Backups created with timestamp +✅ Verbose mode provided detailed output + +### Example Migration (v0.0.0) +``` +Before: + .stp-config + stp/prj/st/ST0001.md + stp/prj/st/ST0002.md + +After: + .intent/config.json + intent/st/ST0001.md + intent/st/ST0002.md +``` + +## Key Design Decisions + +1. **Fail-Forward Approach**: No rollback mechanism, but comprehensive backups +2. **Clear Error Messages**: Unknown versions fail with helpful diagnostics +3. **Version Detection**: Multiple strategies to identify project version +4. **Atomic Operations**: Use temp files and moves where possible +5. **Preserve Data**: All content migrated, nothing lost + +## Issues Fixed + +1. Small output formatting issue with backup messages (cosmetic) +2. All core functionality working correctly + +## Next Steps + +Ready for Phase 3: Repository Restructuring +- Move executables from stp/bin/* to bin/* +- Rename all commands from stp_* to intent_* +- Create compatibility symlinks +- Update all command implementations +- Perform self-migration on the STP project itself + +## Files Created/Modified + +### Created +- `/bin/intent_helpers` - Shared migration utilities +- `/bin/intent_init` - New project initialization +- `/bin/intent_upgrade` - Migration command + +### Modified +- `/bin/intent` - Added new command routing + +## Time Spent + +Phase 2 completed in single session, building on Phase 1 foundation with jq-based configuration. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0016/tasks.md b/intent/st/COMPLETED/ST0016/tasks.md new file mode 100644 index 0000000..d96e875 --- /dev/null +++ b/intent/st/COMPLETED/ST0016/tasks.md @@ -0,0 +1,126 @@ +--- +verblock: "16 Jul 2025:v0.2: Matthew Sinclair - Updated with Phase 0 and new commands" +stp_version: 2.0.0 +--- +# ST0016: Task Tracking + +## Overview + +Task tracking for ST0016 is managed through Backlog.md integration. Use the following commands to create and track tasks: + +```bash +# Create tasks for this steel thread +intent task create ST0016 "Create test infrastructure" +intent task create ST0016 "Implement config loading system" +intent task create ST0016 "Build upgrade command" + +# View all tasks +intent task list ST0016 + +# Or use backlog directly +intent bl list | grep ST0016 +``` + +## High-Level Task Breakdown + +### Phase 0: Test Infrastructure (CRITICAL - DO FIRST) + +- [ ] Create examples/v0.0.0-project with .stp-config format +- [ ] Create examples/v1.2.0-project with file-based STs +- [ ] Create examples/v1.2.1-project with directory-based STs +- [ ] Create examples/hello-world with v2.0.0 structure +- [ ] Write comprehensive BATS upgrade test suite +- [ ] Create test harness for migration scenarios +- [ ] Document expected test outcomes +- [ ] Setup CI/CD for automated testing + +### Phase 1: New Commands + +- [ ] Implement intent bootstrap command +- [ ] Implement intent doctor command +- [ ] Add bootstrap detection logic for INTENT_HOME +- [ ] Create doctor diagnostic checks +- [ ] Add --fix mode for doctor +- [ ] Write tests for new commands + +### Phase 2: Configuration System + +- [ ] Implement JSON parser for shell +- [ ] Create config loading functions +- [ ] Add project root detection logic +- [ ] Implement config overlay (global → local → env) +- [ ] Use ~/.config/intent/ for global config (XDG) +- [ ] Write comprehensive config tests + +### Phase 3: Repository Restructuring + +- [ ] Move stp/bin/* to bin/ +- [ ] Rename executables (stp → intent, stp_*→ intent_*) +- [ ] Create backwards compatibility symlinks +- [ ] Move stp/_templ/ to lib/templates/ +- [ ] Update all template references +- [ ] Test executable paths + +### Phase 4: Upgrade Command + +- [ ] Implement version detection with error handling +- [ ] Add clear error messages for unknown versions +- [ ] Create backup mechanism +- [ ] Build migration logic for each version +- [ ] Add dry-run mode +- [ ] Add progress reporting +- [ ] Convert configs to JSON format +- [ ] Update documentation files +- [ ] Write comprehensive upgrade tests + +### Phase 5: Command Updates + +- [ ] Update main intent script +- [ ] Update intent_init for new structure +- [ ] Update intent_st for flattened paths +- [ ] Update all other subcommands +- [ ] Add deprecation warnings for stp usage +- [ ] Test all commands with both structures + +### Phase 6: Documentation + +- [ ] Update README.md +- [ ] Create migration guide +- [ ] Update all command documentation +- [ ] Create troubleshooting guide +- [ ] Update examples and tutorials +- [ ] Write release notes + +### Phase 7: Release Preparation + +- [ ] Run full test suite +- [ ] Test on real projects (with backups) +- [ ] Create release branch +- [ ] Tag v2.0.0 +- [ ] Prepare announcement +- [ ] Update website/docs + +### Phase 8: Bootstrap & Installation + +- [ ] Update installation instructions +- [ ] Create bootstrap script documentation +- [ ] Test new user flow +- [ ] Update CI/CD pipeline +- [ ] Create getting started guide +- [ ] Test PATH setup instructions + +## Task Management + +When starting work: + +1. Create detailed tasks in Backlog.md for current phase +2. Link tasks to ST0016 +3. Update task status as work progresses +4. Document any issues or decisions in this steel thread + +## Dependencies + +- Requires bash 3.2+ (standard on most systems) +- Requires standard Unix tools (sed, awk, grep) +- No external dependencies for core functionality +- JSON parsing done with sed/grep (no jq required) diff --git a/intent/st/COMPLETED/ST0017/design.md b/intent/st/COMPLETED/ST0017/design.md new file mode 100644 index 0000000..d6be3f0 --- /dev/null +++ b/intent/st/COMPLETED/ST0017/design.md @@ -0,0 +1,194 @@ +# Design - ST0017: Add an Intent sub-agent for Claude Code to Intent + +## Approach + +Implement a sync-based agent management system that: + +1. **Maintains agents within Intent's structure** for version control and distribution +2. **Syncs agents to Claude Code's configuration** when needed +3. **Tracks installation state** using manifest files +4. **Supports both global and project-specific agents** + +The system will integrate seamlessly with existing Intent commands and respect the separation between Intent core (global) and project-specific configurations. + +## Design Decisions + +### 1. Sync vs Symlinks + +**Decision**: Use file sync instead of symbolic links +**Rationale**: + +- Claude Code's symlink support is uncertain +- Cross-platform compatibility (Windows symlinks differ) +- Explicit sync provides validation opportunity +- More predictable behaviour + +### 2. Manifest-Based Tracking + +**Decision**: Use JSON manifests to track agent state +**Rationale**: + +- Clear record of what's installed vs available +- Enables clean uninstall of Intent-managed agents +- Supports modification detection via checksums +- Allows versioning and updates + +### 3. Dual-Level Agent System + +**Decision**: Support both global (Intent-wide) and local (project-specific) agents +**Rationale**: + +- Global agents ship with Intent (intent, elixir) +- Projects can define custom agents +- Clear separation of concerns +- Flexible deployment options + +### 4. Agent Structure + +**Decision**: Agents as directories with metadata +**Rationale**: + +- Richer than single markdown files +- Supports versioning and dependencies +- Enables future extensions +- Clear organization + +## Architecture + +### Directory Structure + +``` +$INTENT_HOME/ # Global Intent installation +├── agents/ # Global agents repository +│ ├── .manifest/ +│ │ └── global-agents.json # Available global agents +│ ├── intent/ +│ │ ├── agent.md # Claude sub-agent definition +│ │ └── metadata.json # Version, description, etc. +│ └── elixir/ +│ ├── agent.md +│ └── metadata.json + +$PROJECT_DIR/ # User's project +├── intent/ +│ └── agents/ # Project-specific agents +│ ├── .manifest/ +│ │ └── installed-agents.json # Tracks installations +│ └── custom-agent/ +│ ├── agent.md +│ └── metadata.json +└── .claude/ + └── agents/ # Claude Code reads from here + ├── intent.md # Synced from global + └── custom-agent.md # Synced from local +``` + +### Manifest Schemas + +#### Global Agents Manifest + +```json +{ + "version": "1.0.0", + "agents": [ + { + "name": "intent", + "version": "2.1.0", + "description": "Intent-aware development assistant", + "path": "intent", + "checksum": "sha256:abc123...", + "tools": ["Bash", "Read", "Write", "Edit"], + "min_intent_version": "2.1.0" + } + ] +} +``` + +#### Installed Agents Manifest + +```json +{ + "version": "1.0.0", + "project": "my-project", + "installed": [ + { + "name": "intent", + "source": "global", + "source_path": "$INTENT_HOME/agents/intent", + "version": "2.1.0", + "installed_at": "2025-01-27T10:00:00Z", + "checksum": "sha256:abc123...", + "modified": false + } + ] +} +``` + +### Command Structure + +```bash +# Core commands +intent agents list # Show available and installed agents +intent agents install # Install agent(s) to Claude config +intent agents sync # Update modified agents +intent agents uninstall # Remove Intent-managed agents +intent agents show # Display agent details + +# Additional commands +intent agents status # Check installation health +intent agents update # Update to newer versions +``` + +### Integration Flow + +``` +1. Developer runs: intent agents install intent + ↓ +2. Intent reads: $INTENT_HOME/agents/.manifest/global-agents.json + ↓ +3. Copies: $INTENT_HOME/agents/intent/agent.md + ↓ +4. To: $PROJECT_DIR/.claude/agents/intent.md + ↓ +5. Updates: $PROJECT_DIR/intent/agents/.manifest/installed-agents.json + ↓ +6. Claude Code can now use the Intent sub-agent +``` + +## Alternatives Considered + +### 1. Direct .claude Management + +**Approach**: Let users manually copy agents to .claude/agents/ +**Rejected because**: + +- No tracking of Intent-managed vs user agents +- No update mechanism +- Poor user experience + +### 2. Symbolic Links + +**Approach**: Symlink from .claude/agents/ to intent/agents/ +**Rejected because**: + +- Uncertain Claude Code support +- Platform compatibility issues +- Security concerns + +### 3. Single Global Manifest + +**Approach**: One manifest in Intent core tracking all projects +**Rejected because**: + +- Violates project isolation +- Intent core shouldn't track project state +- Scaling issues + +### 4. Embedding in Intent Binary + +**Approach**: Include agents directly in Intent commands +**Rejected because**: + +- Not how Claude sub-agents work +- Would require Intent to act as intermediary +- Loses benefits of Claude's agent system diff --git a/intent/st/COMPLETED/ST0017/impl.md b/intent/st/COMPLETED/ST0017/impl.md new file mode 100644 index 0000000..601220a --- /dev/null +++ b/intent/st/COMPLETED/ST0017/impl.md @@ -0,0 +1,251 @@ +# Implementation - ST0017: Add an Intent sub-agent for Claude Code to Intent + +## Implementation Plan + +### Phase 1: Infrastructure (Days 1-2) +1. Create directory structures + - Add `agents/` to Intent core + - Create `.manifest/` subdirectories + - Set up project agent locations + +2. Implement manifest management + - JSON parsing/writing functions + - Checksum calculation + - Manifest validation + +3. Add Claude Code detection + - Check for `.claude` directory + - Verify `claude` command availability + - Handle missing Claude gracefully + +### Phase 2: Core Commands (Days 3-4) +1. Implement `intent_agents` base command + - Command routing + - Help system integration + - Error handling framework + +2. Build core subcommands + - `list` - Show available/installed agents + - `install` - Copy agents to Claude config + - `sync` - Update modified agents + - `uninstall` - Remove managed agents + - `show` - Display agent details + +### Phase 3: Agent Development (Days 5-6) +1. Create Intent sub-agent + - System prompt for Intent awareness + - Steel thread methodology knowledge + - Command reference + +2. Create Elixir sub-agent + - Elixir best practices + - Usage rules integration + - Functional programming focus + +### Phase 4: Integration & Testing (Days 7-8) +1. Integration with existing commands + - Auto-install on `intent init` + - Doctor command checks + - Help system updates + +2. Comprehensive testing + - Unit tests for manifest operations + - Integration tests for sync + - End-to-end workflow tests + +## Technical Details + +### Command Implementation Structure + +```bash +# intent_agents main command +#!/bin/bash +source "$INTENT_BIN/intent_helpers" + +case "$1" in + list) shift; intent_agents_list "$@" ;; + install) shift; intent_agents_install "$@" ;; + sync) shift; intent_agents_sync "$@" ;; + uninstall) shift; intent_agents_uninstall "$@" ;; + show) shift; intent_agents_show "$@" ;; + status) shift; intent_agents_status "$@" ;; + *) intent_agents_help ;; +esac +``` + +### Manifest Operations + +```bash +# Read manifest +read_manifest() { + local manifest_file="$1" + if [ -f "$manifest_file" ]; then + cat "$manifest_file" | jq '.' + else + echo '{"version": "1.0.0", "agents": []}' + fi +} + +# Calculate checksum +calculate_checksum() { + local file="$1" + if command -v sha256sum >/dev/null; then + sha256sum "$file" | cut -d' ' -f1 + else + shasum -a 256 "$file" | cut -d' ' -f1 + fi +} +``` + +### Claude Code Detection + +```bash +detect_claude() { + if [ -d "$HOME/.claude" ] || command -v claude >/dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +# Auto-install prompt +if detect_claude && [ "$1" = "init" ]; then + echo "Claude Code detected. Install Intent agents? [Y/n]" + read -r response + if [[ "$response" =~ ^[Yy]?$ ]]; then + intent agents install --all + fi +fi +``` + +### Agent Metadata Format + +```json +{ + "name": "intent", + "version": "2.0.0", + "description": "Intent-aware development assistant", + "author": "Intent Contributors", + "tools": ["Bash", "Read", "Write", "Edit", "Grep", "WebFetch"], + "tags": ["project-management", "steel-threads", "development"], + "min_intent_version": "2.0.0", + "min_claude_version": null +} +``` + +### Conflict Resolution + +```bash +handle_conflict() { + local target="$1" + local source="$2" + + if [ -f "$target" ]; then + local target_sum=$(calculate_checksum "$target") + local source_sum=$(calculate_checksum "$source") + + if [ "$target_sum" != "$source_sum" ]; then + echo "Warning: Agent already exists and has been modified" + echo "Target: $target" + echo "[O]verwrite, [S]kip, [D]iff, [B]ackup?" + read -r choice + + case "$choice" in + [Oo]) cp "$source" "$target" ;; + [Ss]) return 1 ;; + [Dd]) diff "$target" "$source" | less ;; + [Bb]) + backup="$target.backup.$(date +%Y%m%d_%H%M%S)" + cp "$target" "$backup" + cp "$source" "$target" + ;; + esac + fi + else + cp "$source" "$target" + fi +} +``` + +## Testing Strategy + +### Unit Tests +- Manifest reading/writing +- Checksum calculation +- Path resolution +- JSON validation + +### Integration Tests +- Agent installation flow +- Sync with modifications +- Uninstall cleanup +- Claude detection + +### End-to-End Tests +```bash +# Test full workflow +test_agent_workflow() { + # Setup + intent init test-project + cd test-project + + # Install + intent agents install intent + assert_file_exists ".claude/agents/intent.md" + + # Verify manifest + assert_json_contains "intent/agents/.manifest/installed-agents.json" \ + '.installed[0].name == "intent"' + + # Sync + touch .claude/agents/intent.md + intent agents sync + + # Uninstall + intent agents uninstall intent + assert_file_not_exists ".claude/agents/intent.md" +} +``` + +## Rollout Plan + +### Release Strategy +1. **v2.1.0-beta**: Initial release with agent support + - Core commands functional + - Intent and Elixir agents included + - Documentation complete + +2. **v2.1.0**: Stable release + - Bug fixes from beta feedback + - Performance optimizations + - Additional agents based on demand + +### Migration Steps +1. No breaking changes - additive feature +2. Existing projects gain agent commands automatically +3. Optional auto-install on first use +4. Clear documentation in release notes + +### Documentation Updates +- Update main README with agent examples +- Add agents section to user guide +- Create agent development guide +- Include in `intent help` system + +## Challenges & Solutions + +### Challenge 1: Cross-Platform Compatibility +**Issue**: Checksum commands differ between macOS/Linux +**Solution**: Detect available command and use appropriate syntax + +### Challenge 2: Claude Installation Variations +**Issue**: Claude might be installed in different ways (homebrew, direct, etc.) +**Solution**: Multiple detection methods, graceful fallback + +### Challenge 3: User Customization Preservation +**Issue**: Users might modify agents after installation +**Solution**: Checksum tracking, conflict resolution options + +### Challenge 4: Backwards Compatibility +**Issue**: Need to support projects without agent capability +**Solution**: Additive design, no breaking changes to existing commands \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0017/info.md b/intent/st/COMPLETED/ST0017/info.md new file mode 100644 index 0000000..0f076b3 --- /dev/null +++ b/intent/st/COMPLETED/ST0017/info.md @@ -0,0 +1,64 @@ +--- +verblock: "27 Jul 2025:v0.3: matts - Updated for v2.1.0 with agent init" +intent_version: 2.1.0 +status: Completed +created: 20250727 +completed: 20250727 +--- +# ST0017: Add an Intent sub-agent for Claude Code to Intent + +## Brief to prime Claude Code + +Claude Code has just introduced [Sub Agents](https://docs.anthropic.com/en/docs/claude-code/sub-agents). Please read that now to get up to speed with how it works. + +I want to do things to update Intent to work with Claude Code's sub-agents. + +1. I want to add a new sub-agent called "Intent" which will act as a sub-agent and help Claude work with Intent-based projects. The sub-agent will know all about what Intent is, how it works, and what needs to happen to set it up, ec. + +2. I want to extend Intent so that it is possible to easily add both global (ie available for all Intent instances) and local (ie inly available to the per-project Intent installation). I want a "pluggable sub-agent" capability that is built into Intent so that others can extend it with their own sub-agents. And I want the Intent sub-agent to be the first version of this kind of thing. + +3. And then I want to build the second Intent sub-agent, which will be an "Elixir Code Doctor" that will know about how to write great Elixir code, as well as integrate with the [Usage Rules](https://www.zachdaniel.dev/p/usage-rules-leveling-the-playing) and [Useage Rules on GitHub](https://hexdocs.pm/usage_rules/readme.html) process and [Ash AI](https://github.com/ash-project/ash_ai/blob/main/usage-rules.md). + +In doing #2, we will build the capability to do #1 (and #3), and then make it a plugable capability for Intent going forward. + +To help refine the design, I want you to enter "Intellectual Sparring Partner Mode" and help me to refine the design. Once we have a refined design for adding sub-agents to Intent, we can then go about building it. + +A few notes, in no particular order: + +- We will add a new directory called agents and put it here intent/agents +- In agents/ we will have a sub-dir for each of the agents (ie intent/agents/{intent,elixir}) +- We need a way to manage the installed agents for the Intent core installation (easy: git pull from the Intent repo) and then for the local per-project agents (TBC how this works) +- We will need to add a new command "intent agents" with appropriate support +- We will need commands for it such as "intent agents {list,add,delete,show}" and whatever options make sense + +When you're ready, let's dive in and build out a design. + +## Objective + +Create a sub-agent management system for Intent that integrates with Claude Code's sub-agent capability, allowing Intent projects to leverage specialized AI assistants for development tasks while maintaining Intent's architectural principles. + +## Context + +Claude Code recently introduced [sub-agents](https://docs.anthropic.com/en/docs/claude-code/sub-agents) - specialized AI assistants that can handle specific tasks with focused expertise. This presents an opportunity to enhance Intent with: + +1. **Intent-aware Assistant**: A sub-agent that understands Intent's steel thread methodology, project structure, and commands +2. **Extensible Architecture**: A plugin system allowing users to add domain-specific sub-agents (e.g., Elixir code doctor) +3. **Seamless Integration**: Automatic management of sub-agents within Intent projects + +This will improve developer experience by providing contextual AI assistance that understands both the project management framework (Intent) and specific technical domains. + +## Related Steel Threads + +- None currently - this is a new capability introduced in Intent v2.1.0 (enhanced from v2.0.0) + +## Context for LLM + +This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. + +### How to update this document + +1. Update the status as work progresses +2. Update related documents (design.md, impl.md, etc.) as needed +3. Mark the completion date when finished + +The LLM should assist with implementation details and help maintain this document as work progresses. diff --git a/intent/st/COMPLETED/ST0017/tasks.md b/intent/st/COMPLETED/ST0017/tasks.md new file mode 100644 index 0000000..46c727a --- /dev/null +++ b/intent/st/COMPLETED/ST0017/tasks.md @@ -0,0 +1,84 @@ +# Tasks - ST0017: Add an Intent sub-agent for Claude Code to Intent + +## Tasks + +### Phase 1: Infrastructure +- [x] Create agents directory structure in Intent core +- [x] Create agents/.manifest directory for global manifest +- [x] Implement JSON manifest parsing functions in intent_helpers +- [x] Add checksum calculation utility function +- [x] Implement Claude Code detection function +- [x] Create project agent directory template + +### Phase 2: Core Commands +- [x] Create bin/intent_agents main command file +- [x] Implement intent_agents_list subcommand +- [x] Implement intent_agents_install subcommand +- [x] Implement intent_agents_sync subcommand +- [x] Implement intent_agents_uninstall subcommand +- [x] Implement intent_agents_show subcommand +- [x] Implement intent_agents_status subcommand +- [x] Add agents command to main intent router +- [x] Update help system with agents commands + +### Phase 3: Agent Development +- [x] Create agents/intent directory +- [x] Write Intent sub-agent system prompt (agent.md) +- [x] Create Intent agent metadata.json +- [x] Create agents/elixir directory +- [x] Write Elixir sub-agent system prompt (agent.md) +- [x] Create Elixir agent metadata.json +- [x] Create global-agents.json manifest + +### Phase 4: Integration +- [x] Update intent_init to detect Claude and offer agent installation +- [x] Add agent checks to intent_doctor +- [x] Update intent_upgrade to handle agent migration +- [x] Create agent installation documentation +- [ ] Update main README with agent examples + +### Phase 5: Testing +- [x] Write unit tests for manifest operations +- [x] Write unit tests for checksum functions +- [x] Write integration tests for agent installation +- [x] Write integration tests for sync operations +- [x] Write end-to-end workflow tests +- [x] Test cross-platform compatibility (macOS/Linux) +- [x] Test Claude detection variations + +### Phase 6: Documentation +- [ ] Write agent development guide +- [x] Document agent metadata format +- [x] Create troubleshooting guide +- [ ] Update release notes for v2.1.0 + +## Task Notes + +### Critical Path Items +1. Manifest infrastructure must be complete before command implementation +2. Agent content should be developed in parallel with commands +3. Testing should begin as soon as basic commands work + +### Testing Considerations +- Use mock Claude installations for CI/CD +- Test both with and without Claude installed +- Verify backwards compatibility with existing projects + +## Dependencies + +### External Dependencies +- `jq` for JSON parsing (already required by Intent) +- `sha256sum` or `shasum` for checksums (standard on all platforms) + +### Internal Dependencies +- Requires Intent v2.1.0 or higher +- Must maintain compatibility with existing command structure +- Should integrate with existing helper functions + +### Sequencing +1. **Infrastructure first**: Manifest and utility functions +2. **Commands second**: Build on infrastructure +3. **Agents parallel**: Can be developed alongside commands +4. **Integration**: After core functionality complete +5. **Testing throughout**: Unit tests as we go, integration tests at end +6. **Documentation last**: Once implementation is stable \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0018/design.md b/intent/st/COMPLETED/ST0018/design.md new file mode 100644 index 0000000..17a0960 --- /dev/null +++ b/intent/st/COMPLETED/ST0018/design.md @@ -0,0 +1,86 @@ +--- +verblock: "15 Aug 2025:v0.1: Torrell Ewan - Design specifications for worker-bee agent" +intent_version: 2.2.0 +--- +# Design - ST0018: Worker-Bee Intent Agent for WDD Architecture Enforcement + +## Approach + +Implement a comprehensive Intent agent specializing in Worker-Bee Driven Design (WDD) through: + +1. **Interactive Discovery Pattern**: One-time project structure mapping with persistent storage +2. **Mix Task Integration**: CLI tools for validation, scaffolding, and remapping +3. **Educational Agent**: Claude Code sub-agent providing contextual WDD guidance +4. **Framework Agnostic**: Support for Phoenix, OTP, libraries, Nerves, umbrella projects + +## Design Decisions + +### "Discovery Once" Principle +**Decision**: Agent checks for existing `.wdd_project_map.yaml` before conducting discovery +**Rationale**: Minimizes user interruption while maintaining flexibility for project evolution + +### Mix Task Architecture +**Decision**: Separate tasks for validate, scaffold, and remap operations +**Rationale**: Clear separation of concerns, composable workflows, familiar Elixir patterns + +### EEx Template System +**Decision**: Use Elixir's native EEx templating for code generation +**Rationale**: Leverages existing Elixir tooling, allows customization, maintains consistency + +### YAML Project Maps +**Decision**: Store project structure in `.wdd_project_map.yaml` format +**Rationale**: Human-readable, version-controllable, widely supported format + +## Architecture + +### Agent Layer Structure +``` +worker-bee/ +├── agent.md # Claude Code agent definition +├── metadata.json # Agent configuration +├── USER_GUIDE.md # Complete usage documentation +├── lib/ # Core business logic +│ ├── project_mapper.ex # Discovery and mapping +│ ├── wdd_validator.ex # Compliance validation +│ ├── template_generator.ex # Code scaffolding +│ └── mix/tasks/wdd/ # CLI interface +├── templates/ # EEx generation templates +├── config/ # Validation patterns +└── validation/ # WDD compliance rules +``` + +### WDD 6-Layer Enforcement +1. **Data** - Immutable structures, proper typing +2. **Functions** - Pure business logic, no side effects +3. **Tests** - Behavior-focused, layer-appropriate +4. **Boundaries** - GenServers, APIs, side effect management +5. **Lifecycles** - OTP supervision, application structure +6. **Workers** - Concurrency, background processing + +### Validation Engine +- **Pattern-based detection** using configurable rules +- **Scoring system** with layer-specific and overall metrics +- **Smart suggestions** for re-mapping when structure evolves +- **Framework awareness** for context-appropriate validation + +## Alternatives Considered + +### Alternative 1: Macro-based Code Generation +**Rejected**: Would require compile-time dependency, limiting flexibility +**Chosen**: Mix task with EEx templates for runtime generation + +### Alternative 2: Hard-coded Project Structure +**Rejected**: Inflexible for diverse project organizations +**Chosen**: Interactive discovery with persistent mapping + +### Alternative 3: Single Validation Command +**Rejected**: Would create overly complex interface +**Chosen**: Separate tasks for validate, scaffold, remap operations + +### Alternative 4: JSON Project Configuration +**Rejected**: Less human-readable than YAML +**Chosen**: YAML for better developer experience + +### Alternative 5: Framework-specific Agents +**Rejected**: Would fragment WDD knowledge across multiple agents +**Chosen**: Single agent with framework awareness and detection \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0018/impl.md b/intent/st/COMPLETED/ST0018/impl.md new file mode 100644 index 0000000..e1cfb7c --- /dev/null +++ b/intent/st/COMPLETED/ST0018/impl.md @@ -0,0 +1,205 @@ +--- +verblock: "15 Aug 2025:v0.1: Torrell Ewan - Implementation details for worker-bee agent" +intent_version: 2.2.0 +--- +# Implementation - ST0018: Worker-Bee Intent Agent for WDD Architecture Enforcement + +## Implementation + +The worker-bee agent was implemented as a comprehensive WDD specialist with three main components: + +### 1. Claude Code Agent Integration +- **Agent Definition**: `agents/worker-bee/agent.md` with comprehensive system prompt +- **Metadata Configuration**: `agents/worker-bee/metadata.json` with tool specifications +- **Installation**: Integrates with Intent's agent management system via `intent agents install worker-bee` + +### 2. Mix Task CLI Interface +Three dedicated Mix tasks provide command-line functionality: +- `mix wdd.validate` - Compliance validation with scoring and detailed feedback +- `mix wdd.scaffold` - Code generation following project conventions +- `mix wdd.remap` - Project structure remapping with backup functionality + +### 3. Supporting Infrastructure +- **Business Logic Modules**: ProjectMapper, WDDValidator, TemplateGenerator +- **EEx Templates**: Code generation templates for all WDD component types +- **Validation Rules**: Pattern-based compliance checking +- **Configuration**: YAML-based validation patterns and project mapping + +## Code Examples + +### Agent System Prompt Structure +```markdown +--- +name: worker-bee +description: Worker-Bee Driven Design specialist for Elixir applications +tools: Bash, Read, Write, Edit, Grep, Glob, LS +--- + +You are a Worker-Bee Driven Design (WDD) specialist... + +**FIRST CHECK**: Always verify if a WDD project map already exists before conducting discovery. +``` + +### Project Mapping Discovery +```elixir +defmodule WorkerBee.ProjectMapper do + def discover_project_structure(project_path) do + with {:ok, project_type} <- detect_project_type(project_path), + {:ok, existing_structure} <- scan_directory_structure(project_path), + {:ok, user_preferences} <- conduct_interactive_discovery(project_type, existing_structure), + {:ok, project_map} <- generate_project_map(user_preferences) do + {:ok, project_map} + end + end +end +``` + +### Mix Task Implementation Pattern +```elixir +defmodule Mix.Tasks.Wdd.Validate do + use Mix.Task + + def run(args) do + {opts, _} = OptionParser.parse!(args, switches: @switches) + + with {:ok, project_map} <- load_or_discover_project_map(opts), + {:ok, validation_results} <- validate_project(project_map, opts) do + display_results(validation_results, opts) + end + end +end +``` + +### Template Generation System +```elixir +# EEx template for functional core +defmodule <%= module_name %> do + @moduledoc """ + Functional core for <%= description %>. + + This module contains pure business logic with no side effects. + All functions are composable and return tagged tuples. + """ + + def process_<%= function_name %>(data) do + data + |> validate_input() + |> transform_data() + |> format_result() + end + + defp validate_input(data) do + # Pure validation logic + end +end +``` + +## Technical Details + +### Project Map Structure +```yaml +project_name: "my_app" +project_type: phoenix_web +root_path: "/path/to/project" + +wdd_layers: + data: "lib/my_app/types" + functions: "lib/my_app/core" + tests: "test" + boundaries: "lib/my_app_web" + lifecycles: "lib/my_app/application.ex" + workers: "lib/my_app/workers" + +naming_conventions: + module_prefix: "MyApp" + functional_core_suffix: "Core" +``` + +### Validation Engine Architecture +- **Pattern-based Detection**: Uses regex patterns to identify WDD violations +- **Scoring Algorithm**: Layer-specific scores aggregated into overall project score +- **Rule Categories**: Functional core purity, boundary patterns, data structures, testing +- **Framework Awareness**: Different validation rules for Phoenix, OTP, libraries + +### File Organization +``` +agents/worker-bee/ +├── agent.md # Claude Code agent definition +├── metadata.json # Agent configuration +├── USER_GUIDE.md # Complete usage documentation +├── README.md # Project overview +├── lib/ +│ ├── project_mapper.ex # Interactive discovery +│ ├── wdd_validator.ex # Compliance validation +│ ├── template_generator.ex # Code scaffolding +│ └── mix/tasks/wdd/ +│ ├── validate.ex # Validation CLI +│ ├── scaffold.ex # Generation CLI +│ └── remap.ex # Remapping CLI +├── templates/ +│ ├── functional_core.ex.eex # Pure function templates +│ ├── boundary_genserver.ex.eex # GenServer templates +│ └── [other component templates] +├── config/ +│ └── wdd_patterns.yaml # Validation patterns +└── validation/ + ├── functional_core_rules.ex # Purity validation + ├── boundary_rules.ex # GenServer patterns + ├── data_rules.ex # Structure validation + └── testing_rules.ex # Test organization +``` + +## Challenges & Solutions + +### Challenge 1: "Discovery Once" Implementation +**Problem**: Agent needed to remember project structure without being intrusive +**Solution**: Implemented persistent `.wdd_project_map.yaml` with intelligent re-mapping detection + +### Challenge 2: Framework Agnostic Design +**Problem**: Different Elixir project types have vastly different structures +**Solution**: Interactive discovery process that adapts to any project organization + +### Challenge 3: Educational vs. Prescriptive Balance +**Problem**: Agent needed to teach WDD principles while being practical +**Solution**: Contextual explanations in every response, gradual adoption guidance + +### Challenge 4: Mix Task Integration Complexity +**Problem**: Rich CLI functionality while maintaining simplicity +**Solution**: Separate tasks with shared business logic modules, consistent flag patterns + +### Challenge 5: Code Generation Flexibility +**Problem**: Generated code needed to match project conventions +**Solution**: EEx templating system using project map data for customization + +### Challenge 6: Validation Engine Performance +**Problem**: Large codebases could make validation slow +**Solution**: Targeted validation using project map, parallel processing where possible + +### Challenge 7: Intent Agent System Integration +**Problem**: Ensuring agent follows Intent's agent patterns and conventions +**Solution**: Followed established agent structure from existing intent/elixir agents + +## Key Implementation Insights + +### "Discovery Once" Principle Success +The persistent project mapping approach proved essential for user experience. Users appreciate that the agent remembers their project structure and doesn't repeat discovery unless explicitly requested or when significant changes are detected. + +### Framework Detection Intelligence +Automatic project type detection combined with interactive confirmation creates the right balance of automation and user control. The agent can intelligently suggest appropriate WDD layer organization while respecting user preferences. + +### Educational Agent Pattern +The system prompt emphasizes explanation and context rather than just prescriptive rules. This creates a teaching agent that helps developers understand WDD principles rather than just enforcing them blindly. + +### Mix Task Composability +Separate tasks for validate, scaffold, and remap operations allow for flexible workflows while sharing common business logic. Users can compose these tasks into their development processes naturally. + +## Files Created + +**Total**: 19 files across agent definition, business logic, templates, and documentation +**Core Agent**: agent.md (212 lines), metadata.json (27 lines) +**Documentation**: USER_GUIDE.md (563 lines), README.md (222 lines) +**Business Logic**: 8 Elixir modules with comprehensive functionality +**Templates**: EEx templates for all WDD component types +**Configuration**: YAML validation patterns and rules + +This implementation provides a comprehensive foundation for WDD architecture enforcement while maintaining flexibility and educational value. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0018/info.md b/intent/st/COMPLETED/ST0018/info.md new file mode 100644 index 0000000..11226e8 --- /dev/null +++ b/intent/st/COMPLETED/ST0018/info.md @@ -0,0 +1,40 @@ +--- +verblock: "15 Aug 2025:v0.1: Torrell Ewan - Initial version" +intent_version: 2.2.0 +status: Completed +created: 20250815 +completed: 20250815 +--- +# ST0018: Worker-Bee Intent Agent for WDD Architecture Enforcement + +## Objective + +Create a comprehensive Intent agent that enforces Worker-Bee Driven Design (WDD) architecture principles in Elixir applications through interactive project mapping, automated validation, and intelligent code scaffolding. + +## Context + +The Intent project's agent system needed a specialized WDD expert to help Elixir developers maintain architectural consistency. Worker-Bee Driven Design is a 6-layer architecture methodology that emphasizes functional programming principles, clear separation of concerns, and maintainable code structure. + +This steel thread addresses the need for: +- Automated WDD compliance validation +- Intelligent project structure discovery and mapping +- Educational guidance on WDD principles +- Framework-agnostic support for any Elixir project type +- Integration with Claude Code's sub-agent system + +## Related Steel Threads + +- ST0017: Intent Agent System (foundational agent infrastructure) +- Related to Elixir development practices and architectural guidance + +## Context for LLM + +This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. + +### How to update this document + +1. Update the status as work progresses +2. Update related documents (design.md, impl.md, etc.) as needed +3. Mark the completion date when finished + +The LLM should assist with implementation details and help maintain this document as work progresses. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0018/tasks.md b/intent/st/COMPLETED/ST0018/tasks.md new file mode 100644 index 0000000..778f04d --- /dev/null +++ b/intent/st/COMPLETED/ST0018/tasks.md @@ -0,0 +1,113 @@ +--- +verblock: "15 Aug 2025:v0.1: Torrell Ewan - Task breakdown for worker-bee agent implementation" +intent_version: 2.2.0 +--- +# Tasks - ST0018: Worker-Bee Intent Agent for WDD Architecture Enforcement + +## Tasks + +### Phase 1: Agent Foundation +- [x] Create agent directory structure +- [x] Design comprehensive system prompt for WDD expertise +- [x] Implement "discovery once" pattern with project mapping +- [x] Create metadata.json with proper tool specifications +- [x] Test agent installation and basic functionality + +### Phase 2: Mix Task Infrastructure +- [x] Design Mix task architecture (validate, scaffold, remap) +- [x] Implement mix wdd.validate with scoring and feedback +- [x] Implement mix wdd.scaffold with EEx template system +- [x] Implement mix wdd.remap with backup functionality +- [x] Create shared business logic modules + +### Phase 3: Validation Engine +- [x] Design WDD compliance rules and patterns +- [x] Implement functional core purity validation +- [x] Implement boundary layer pattern validation +- [x] Implement data structure validation +- [x] Implement testing organization validation +- [x] Create scoring algorithm with layer-specific metrics + +### Phase 4: Code Generation System +- [x] Design EEx template architecture +- [x] Create functional core templates +- [x] Create boundary GenServer templates +- [x] Create data structure templates +- [x] Create testing templates +- [x] Implement project-aware generation using mapping + +### Phase 5: Framework Support +- [x] Implement Phoenix project type detection and patterns +- [x] Implement OTP application patterns +- [x] Implement library project patterns +- [x] Add framework-aware validation rules +- [x] Create context-appropriate scaffolding + +### Phase 6: Interactive Discovery +- [x] Design project structure discovery workflow +- [x] Implement project type detection +- [x] Create interactive questionnaire system +- [x] Implement persistent project mapping +- [x] Add intelligent re-mapping suggestions + +### Phase 7: Documentation & User Experience +- [x] Create comprehensive USER_GUIDE.md +- [x] Write project README with examples +- [x] Document all Mix task options and examples +- [x] Create troubleshooting guide +- [x] Add educational guidance for WDD principles + +### Phase 8: Integration & Testing +- [x] Integrate with Intent agent management system +- [x] Test agent installation process +- [x] Validate all Mix tasks function correctly +- [x] Test framework detection across project types +- [x] Verify educational explanations are helpful + +### Phase 9: Intent Project Integration +- [x] Create steel thread documentation (ST0018) +- [x] Update Intent documentation with agent creation guide +- [x] Document agent in Intent's available agents list +- [x] Ensure agent follows Intent project conventions + +## Task Notes + +### Critical Success Factors +- **"Discovery Once" Implementation**: Essential for user experience - agent must remember project structure without being intrusive +- **Framework Agnostic Design**: Must work equally well with Phoenix, OTP, libraries, and other Elixir project types +- **Educational Balance**: Agent should teach WDD principles while being practical and actionable +- **Code Generation Quality**: Generated code must follow project conventions and established patterns + +### Implementation Approach +Tasks were completed in logical sequence with foundations first (agent definition, core business logic) followed by user-facing features (Mix tasks, documentation). The educational aspect was integrated throughout rather than added as an afterthought. + +### Quality Assurance +Each phase included validation that generated code follows WDD principles, ensuring the agent practices what it preaches. All Mix tasks were tested with various project structures to ensure robustness. + +## Dependencies + +### Prerequisites Completed +- ST0017: Intent Agent System infrastructure (provides agent installation framework) +- Intent v2.2.0 agent management capabilities +- Claude Code sub-agent integration + +### External Dependencies +- Elixir/Mix ecosystem for task integration +- YAML library for project mapping persistence +- EEx templating system for code generation +- File system access for project structure discovery + +### Internal Dependencies +- Agent definition must be complete before Mix task implementation +- Project mapping system must work before validation can use it +- Business logic modules must be implemented before CLI interfaces +- Templates must be created before scaffolding functionality + +### Sequential Requirements +1. Agent foundation (system prompt, metadata) enables Claude Code integration +2. Project mapping system enables all other functionality +3. Validation engine requires mapping system and pattern definitions +4. Scaffolding requires both mapping system and template infrastructure +5. Documentation requires all features to be complete for accurate examples + +All dependencies were satisfied during implementation, with the agent now providing comprehensive WDD support for Elixir projects while integrating seamlessly with Intent's project management methodology. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0019/design.md b/intent/st/COMPLETED/ST0019/design.md new file mode 100644 index 0000000..ababa90 --- /dev/null +++ b/intent/st/COMPLETED/ST0019/design.md @@ -0,0 +1,100 @@ +--- +verblock: "05 Sep 2025:v0.1: matts - Design specifications for ash-expert agent" +intent_version: 2.3.2 +--- +# Design - ST0019: Ash-Expert Agent for Modern Ash Framework Code Quality and Architecture + +## Approach + +Implement a specialized Intent agent focused on Ash Framework expertise through: + +1. **4-Tier Quality System**: Structured expertise levels from critical fixes to advanced patterns +2. **"Strict but Helpful Mentor" Philosophy**: Enforce quality gates while providing educational guidance +3. **Modern Pattern Focus**: Promote Ash 3.0+ features over legacy approaches +4. **Documentation Integration**: Leverage existing Intent documentation at intent/docs/ref/ash/ +5. **Claude Code Sub-Agent**: Integrate with Intent's agent system for specialized task delegation + +## Design Decisions + +### 4-Tier Expertise Architecture +**Decision**: Structure agent capabilities into four distinct tiers +**Rationale**: Provides clear escalation path from critical fixes to advanced scenarios, ensuring both beginners and experts get appropriate guidance + +### "Strict but Helpful Mentor" Personality +**Decision**: Design agent to be opinionated about quality while remaining educational +**Rationale**: Prevents developers from shipping anti-patterns while teaching proper Ash principles through concrete examples + +### Anti-Pattern Detection Focus +**Decision**: Prioritize flagging direct Ecto usage and other critical anti-patterns +**Rationale**: Prevents the most common and damaging mistakes that bypass Ash's benefits + +### Modern Ash 3.0+ Pattern Promotion +**Decision**: Exclusively recommend current best practices, not legacy approaches +**Rationale**: Ensures codebases use the most maintainable and performant patterns available + +### Documentation-Driven Responses +**Decision**: Always reference intent/docs/ref/ash/ documentation in responses +**Rationale**: Maintains consistency with project patterns and provides learning resources + +## Architecture + +### Agent Structure +``` +ash-expert/ +├── agent.md # Claude Code agent definition (164 lines) +├── metadata.json # Agent configuration with tool specs +└── [Integration with Intent agent system] +``` + +### 4-Tier Capability System + +**Tier 1: Critical Quality Gates (Must Fix Immediately)** +- Ecto/Ash pattern detection and flagging +- Resource definition validation +- Query anti-pattern identification +- Action implementation review + +**Tier 2: Modern Pattern Promotion (Architectural Guidance)** +- Ash 3.0+ feature suggestions +- Domain-driven design enforcement +- Authorization pattern review +- Performance optimization identification + +**Tier 3: Development Quality (Best Practices)** +- Migration generation guidance +- Test template generation +- Error handling enforcement +- Documentation integration + +**Tier 4: Advanced Scenarios (Expert-Level)** +- Multi-resource transaction review +- Change tracking implementation +- Code interface generation + +### Integration Points +- **Intent Project Integration**: Works within steel thread methodology +- **Agent Ecosystem**: Complements elixir and worker-bee agents +- **Documentation System**: References intent/docs/ref/ash/ patterns +- **Quality Enforcement**: Integrates with project quality gates + +## Alternatives Considered + +### Alternative 1: Extend Elixir Agent with Ash Knowledge +**Rejected**: Would dilute the elixir agent's focus and create knowledge overlap +**Chosen**: Dedicated ash-expert agent with specialized Ash Framework knowledge + +### Alternative 2: Simple Pattern Checker +**Rejected**: Would only catch basic issues without providing educational value +**Chosen**: Comprehensive mentor that teaches while enforcing quality + +### Alternative 3: Framework-Agnostic Data Layer Agent +**Rejected**: Ash has unique patterns that generic approaches wouldn't handle well +**Chosen**: Ash-specific agent that understands framework philosophy and patterns + +### Alternative 4: Documentation-Only Approach +**Rejected**: Static documentation doesn't provide context-aware guidance +**Chosen**: Interactive agent that applies documentation patterns to specific code + +### Alternative 5: Flat Expertise Model +**Rejected**: Would be overwhelming for beginners and insufficient for experts +**Chosen**: 4-tier system that scales guidance to developer needs and code complexity \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0019/impl.md b/intent/st/COMPLETED/ST0019/impl.md new file mode 100644 index 0000000..9224e76 --- /dev/null +++ b/intent/st/COMPLETED/ST0019/impl.md @@ -0,0 +1,190 @@ +--- +verblock: "05 Sep 2025:v0.1: matts - Implementation details for ash-expert agent" +intent_version: 2.3.2 +--- +# Implementation - ST0019: Ash-Expert Agent for Modern Ash Framework Code Quality and Architecture + +## Implementation + +The ash-expert agent was implemented as a comprehensive Ash Framework specialist with two main components: + +### 1. Claude Code Agent Integration +- **Agent Definition**: `intent/plugins/claude/subagents/ash-expert/agent.md` with extensive system prompt (164 lines) +- **Metadata Configuration**: `intent/plugins/claude/subagents/ash-expert/metadata.json` with tool specifications +- **Installation**: Integrates with Intent's agent management system via `intent claude subagents install ash-expert` + +### 2. 4-Tier Expertise System +Four structured capability tiers provide escalating levels of Ash Framework guidance: +- **Tier 1**: Critical quality gates for immediate fixes +- **Tier 2**: Modern pattern promotion for architectural guidance +- **Tier 3**: Development quality best practices +- **Tier 4**: Advanced scenarios for expert-level implementations + +## Code Examples + +### Agent System Prompt Structure +```markdown +--- +name: ash-expert +description: Modern Ash 3.0+ specialist for code quality, best practices, and architectural guidance +tools: Bash, Read, Write, Edit, Grep, Glob, LS +--- + +You are a specialized Ash Framework expert with deep expertise in modern Ash 3.0+ patterns... + +## Your Role - The "Strict but Helpful Mentor" + +When working with developers, you should: +1. **Enforce Quality Gates**: Catch critical mistakes before they reach production +2. **Promote Modern Patterns**: Suggest Ash 3.0+ approaches over legacy patterns +3. **Provide Concrete Examples**: Show actual code transformations, not abstract advice +``` + +### Agent Usage Patterns +```elixir +# Resource Quality Review +Task( + description="Review Payment resource for Ash best practices", + prompt="Analyze lib/my_app/resources/payment.ex for anti-patterns, suggest modern Ash 3.0+ improvements", + subagent_type="ash-expert" +) + +# Query Optimization +Task( + description="Optimize Ash query performance", + prompt="Review PaymentService.list_payments/1 - fix post-filtering issues and show proper Ash.Query patterns", + subagent_type="ash-expert" +) +``` + +### Critical Anti-Pattern Detection +The agent flags these issues immediately: +```elixir +# ❌ Direct Ecto bypass +Repo.all(Payment) +Ecto.Changeset.change(payment, %{status: :paid}) + +# ✅ Proper Ash patterns +Ash.read!(Payment) +Ash.Changeset.for_update(payment, :mark_paid) +``` + +### Modern Ash 3.0+ Pattern Promotion +```elixir +# ❌ Legacy loop patterns +for user <- users, do: update_user_status(user, :active) + +# ✅ Modern bulk operations +Ash.bulk_update!(User, :activate, %{status: :active}) +``` + +## Technical Details + +### Agent File Structure +``` +intent/plugins/claude/subagents/ash-expert/ +├── agent.md # Claude Code agent definition (164 lines) +└── metadata.json # Agent configuration (14 lines) +``` + +### Metadata Configuration +```json +{ + "name": "ash-expert", + "version": "1.0.0", + "description": "Modern Ash 3.0+ specialist providing comprehensive code quality enforcement...", + "author": "Intent Development Team", + "tools": ["Bash", "Read", "Write", "Edit", "Grep", "Glob", "LS"], + "tags": ["ash", "ash-framework", "elixir", "code-quality", "performance", "domain-driven-design"], + "context_sources": [ + "intent/docs/ref/ash/", + "intent/docs/ref/ash/ash_usage_rules.md", + "intent/docs/ref/ash/deps/ash_postgres/usage-rules.md", + "intent/docs/ref/ash/deps/ash_phoenix/usage-rules.md" + ] +} +``` + +### 4-Tier Capability Mapping +Each tier targets specific developer needs and code complexity levels: + +**Tier 1: Critical Quality Gates** +- Pattern detection using systematic code analysis +- Immediate flagging of Ecto/Ash anti-patterns +- Resource definition validation preventing cast errors +- Action implementation review for proper Ash usage + +**Tier 2: Modern Pattern Promotion** +- Ash 3.0+ feature suggestions with concrete examples +- Domain-driven design boundary validation +- Authorization policy review and security gap identification +- Performance optimization through modern Ash patterns + +**Tier 3: Development Quality** +- ash_postgres migration guidance with proper constraints +- Test template generation for Ash-specific testing patterns +- Error handling enforcement using Ash error system +- Documentation integration with intent/docs/ref/ash/ + +**Tier 4: Advanced Scenarios** +- Multi-resource transaction pattern validation +- Change tracking and audit trail implementation guidance +- Code interface generation for domain-driven APIs + +### Integration Architecture +The agent integrates with Intent's ecosystem through: +- **Steel Thread Awareness**: References architectural decisions in steel threads +- **Documentation Integration**: Always references intent/docs/ref/ash/ patterns +- **Agent Ecosystem**: Complements elixir (general Elixir patterns) and worker-bee (WDD architecture) agents +- **Quality Gates**: Integrates with project quality enforcement workflows + +## Challenges & Solutions + +### Challenge 1: Balancing Strictness with Helpfulness +**Problem**: Agent needed to be opinionated about quality without being discouraging +**Solution**: Implemented "strict but helpful mentor" personality that explains the "why" behind quality requirements and provides concrete examples for improvements + +### Challenge 2: Ash Framework Complexity +**Problem**: Ash has many nuanced patterns that generic advice wouldn't handle well +**Solution**: Created 4-tier system that scales from basic anti-pattern detection to advanced transaction patterns, allowing appropriate guidance for different skill levels + +### Challenge 3: Integration with Existing Agent Ecosystem +**Problem**: Needed to complement existing elixir and worker-bee agents without overlap +**Solution**: Focused specifically on Ash Framework patterns while referencing other agents for general Elixir (elixir agent) and architecture (worker-bee agent) guidance + +### Challenge 4: Documentation Context Awareness +**Problem**: Agent needed to stay current with project-specific Ash patterns and usage rules +**Solution**: Built-in context_sources that reference intent/docs/ref/ash/ documentation, ensuring consistency with established project patterns + +### Challenge 5: Modern Pattern Promotion +**Problem**: Needed to promote Ash 3.0+ patterns over legacy approaches without breaking existing code +**Solution**: Agent suggests modern patterns with migration strategies and explains benefits, allowing developers to upgrade incrementally + +### Challenge 6: Quality Gate Implementation +**Problem**: Needed to catch critical mistakes without overwhelming developers +**Solution**: Prioritized Tier 1 (critical) issues that prevent production problems while organizing other guidance into structured tiers + +## Key Implementation Insights + +### "Strict but Helpful Mentor" Success +The agent personality strikes the right balance between quality enforcement and education. Developers receive firm guidance on anti-patterns while understanding the reasoning and getting concrete improvement examples. + +### 4-Tier Expertise Scaling +The tiered approach allows the agent to provide appropriate guidance regardless of developer skill level or code complexity. Beginners get critical fixes while experts get advanced pattern guidance. + +### Documentation Integration Value +Always referencing intent/docs/ref/ash/ ensures consistency with project standards and provides developers with learning resources beyond the immediate interaction. + +### Anti-Pattern Focus Impact +Prioritizing detection of direct Ecto usage and other critical anti-patterns prevents the most damaging mistakes that completely bypass Ash's benefits. + +### Modern Pattern Promotion Effectiveness +Exclusively promoting Ash 3.0+ patterns ensures codebases use current best practices, improving maintainability and performance while preventing technical debt accumulation. + +## Files Created + +**Total**: 2 files implementing comprehensive Ash Framework expertise +**Core Agent**: agent.md (164 lines of detailed system prompt and usage examples) +**Configuration**: metadata.json (14 lines with comprehensive tags and context sources) + +The implementation provides a specialized foundation for Ash Framework quality enforcement while maintaining educational value and integration with Intent's project methodology. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0019/info.md b/intent/st/COMPLETED/ST0019/info.md new file mode 100644 index 0000000..5e3d2df --- /dev/null +++ b/intent/st/COMPLETED/ST0019/info.md @@ -0,0 +1,44 @@ +--- +verblock: "05 Sep 2025:v0.1: matts - Initial version" +intent_version: 2.3.2 +status: Completed +created: 20250905 +completed: 20250905 +--- +# ST0019: Ash-Expert Agent for Modern Ash Framework Code Quality and Architecture + +## Objective + +Create a specialized Intent agent that provides comprehensive Ash Framework expertise, focusing on modern Ash 3.0+ patterns, code quality enforcement, and architectural guidance to prevent common anti-patterns and promote best practices. + +## Context + +The Intent project's agent system needed a dedicated Ash Framework specialist to complement the existing elixir and worker-bee agents. While the elixir agent provides general Elixir Usage Rules and the worker-bee agent enforces WDD architecture, there was a gap in specialized Ash Framework knowledge. + +This steel thread addresses the need for: +- Critical quality gates to catch Ecto/Ash anti-patterns before production +- Modern Ash 3.0+ pattern promotion over legacy approaches +- Performance optimization through proper query patterns and bulk operations +- Comprehensive migration and resource definition guidance +- Integration with existing Intent documentation at intent/docs/ref/ash/ +- A "strict but helpful mentor" approach to Ash development + +The ash-expert agent acts as the final quality layer for Ash Framework implementations, ensuring code follows modern patterns, performs well, and maintains proper domain-driven design principles. + +## Related Steel Threads + +- ST0017: Intent Agent System (foundational agent infrastructure) +- ST0018: Worker-Bee Intent Agent (complementary WDD architecture enforcement) +- Related to Elixir agent for general language patterns + +## Context for LLM + +This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. + +### How to update this document + +1. Update the status as work progresses +2. Update related documents (design.md, impl.md, etc.) as needed +3. Mark the completion date when finished + +The LLM should assist with implementation details and help maintain this document as work progresses. \ No newline at end of file diff --git a/intent/st/COMPLETED/ST0019/tasks.md b/intent/st/COMPLETED/ST0019/tasks.md new file mode 100644 index 0000000..1f811b7 --- /dev/null +++ b/intent/st/COMPLETED/ST0019/tasks.md @@ -0,0 +1,121 @@ +--- +verblock: "05 Sep 2025:v0.1: matts - Task breakdown for ash-expert agent implementation" +intent_version: 2.3.2 +--- +# Tasks - ST0019: Ash-Expert Agent for Modern Ash Framework Code Quality and Architecture + +## Tasks + +### Phase 1: Agent Foundation +- [x] Create agent directory structure in intent/plugins/claude/subagents/ash-expert/ +- [x] Design comprehensive system prompt focused on Ash Framework expertise +- [x] Implement "strict but helpful mentor" personality in agent definition +- [x] Create metadata.json with proper tool specifications and context sources +- [x] Test agent installation and basic functionality + +### Phase 2: 4-Tier Expertise System +- [x] Design 4-tier capability architecture (Critical, Modern, Quality, Advanced) +- [x] Implement Tier 1: Critical quality gates for anti-pattern detection +- [x] Implement Tier 2: Modern Ash 3.0+ pattern promotion +- [x] Implement Tier 3: Development quality best practices +- [x] Implement Tier 4: Advanced scenarios for expert-level guidance +- [x] Create structured escalation from basic to advanced capabilities + +### Phase 3: Anti-Pattern Detection System +- [x] Design critical anti-pattern identification patterns +- [x] Implement Ecto/Ash pattern detection and flagging +- [x] Implement resource definition validation +- [x] Implement query anti-pattern identification (N+1, manual loops) +- [x] Implement action implementation review patterns +- [x] Create immediate flagging system for production-critical issues + +### Phase 4: Modern Pattern Promotion +- [x] Design Ash 3.0+ feature promotion system +- [x] Create bulk operation pattern suggestions +- [x] Implement atomic update pattern promotion +- [x] Design domain-driven design boundary enforcement +- [x] Create authorization policy review patterns +- [x] Implement performance optimization identification + +### Phase 5: Documentation Integration +- [x] Design documentation reference system for intent/docs/ref/ash/ +- [x] Implement context_sources integration with usage rules +- [x] Create ash_postgres usage rules integration +- [x] Create ash_phoenix usage rules integration +- [x] Ensure consistency with existing project patterns +- [x] Add learning resource references in agent responses + +### Phase 6: Agent Ecosystem Integration +- [x] Design integration with existing elixir agent +- [x] Design integration with existing worker-bee agent +- [x] Ensure complementary capabilities without overlap +- [x] Test agent interaction patterns with Intent system +- [x] Validate agent follows Intent project conventions + +### Phase 7: Quality Assurance & Testing +- [x] Test agent with various Ash resource patterns +- [x] Validate anti-pattern detection accuracy +- [x] Test modern pattern suggestions for effectiveness +- [x] Verify documentation integration works correctly +- [x] Ensure "strict but helpful" personality balance + +### Phase 8: Integration & Deployment +- [x] Integrate with Intent's agent management system +- [x] Update global-agents.json manifest +- [x] Update AGENTS.md with ash-expert agent description +- [x] Test agent installation and availability +- [x] Validate agent works within Claude Code sub-agent system + +### Phase 9: Intent Project Documentation +- [x] Create steel thread documentation (ST0019) +- [x] Update Intent documentation with ash-expert agent +- [x] Document agent in Intent's available agents list +- [x] Ensure agent follows Intent project conventions +- [x] Complete steel thread files (info.md, design.md, impl.md, tasks.md) + +## Task Notes + +### Critical Success Factors +- **4-Tier Expertise Balance**: Essential for scaling guidance from beginner to expert levels without overwhelming users +- **Anti-Pattern Focus**: Must catch critical Ecto bypasses and resource definition errors that cause production issues +- **Modern Pattern Promotion**: Agent should exclusively promote Ash 3.0+ patterns to prevent technical debt +- **Documentation Integration**: All responses must reference intent/docs/ref/ash/ for consistency and learning + +### Implementation Approach +Tasks were completed in logical sequence with agent foundation first, followed by the core expertise system, then integration and testing. The "strict but helpful mentor" personality was integrated throughout rather than added as an afterthought. + +### Quality Assurance +Each phase included validation that the agent provides appropriate guidance for its tier level. Anti-pattern detection was tested with common Ash/Ecto mistakes, and modern pattern promotion was validated against Ash 3.0+ best practices. + +## Dependencies + +### Prerequisites Completed +- ST0017: Intent Agent System infrastructure (provides agent installation framework) +- Intent v2.3.2 plugin architecture and Claude subagent integration +- Claude Code sub-agent system compatibility +- Existing elixir and worker-bee agents for ecosystem integration + +### External Dependencies +- Claude Code sub-agent system for agent execution +- Intent's plugin architecture for agent registration +- Ash Framework knowledge base and documentation +- intent/docs/ref/ash/ documentation structure + +### Internal Dependencies +- Agent definition must be complete before testing can begin +- 4-tier system must be designed before individual tier implementation +- Anti-pattern detection requires comprehensive Ash/Ecto pattern knowledge +- Documentation integration requires existing usage rules and patterns + +### Sequential Requirements +1. Agent foundation (system prompt, metadata) enables basic functionality +2. 4-tier system design enables structured capability implementation +3. Anti-pattern detection requires deep Ash Framework pattern knowledge +4. Modern pattern promotion requires understanding of Ash 3.0+ features +5. Documentation integration requires all features to reference consistently +6. Ecosystem integration requires understanding of elixir and worker-bee agents + +All dependencies were satisfied during implementation, with the agent now providing comprehensive Ash Framework expertise while maintaining consistency with Intent's project methodology and agent ecosystem. + +### Completion Status +All tasks completed successfully. The ash-expert agent is fully implemented and integrated with Intent's agent system, providing comprehensive Ash Framework expertise through the 4-tier system while maintaining the "strict but helpful mentor" approach. \ No newline at end of file diff --git a/stp/prj/st/ST0010/design.md b/intent/st/NOT-STARTED/ST0010/design.md similarity index 100% rename from stp/prj/st/ST0010/design.md rename to intent/st/NOT-STARTED/ST0010/design.md diff --git a/stp/prj/st/ST0010/impl.md b/intent/st/NOT-STARTED/ST0010/impl.md similarity index 100% rename from stp/prj/st/ST0010/impl.md rename to intent/st/NOT-STARTED/ST0010/impl.md diff --git a/stp/prj/st/ST0010/info.md b/intent/st/NOT-STARTED/ST0010/info.md similarity index 96% rename from stp/prj/st/ST0010/info.md rename to intent/st/NOT-STARTED/ST0010/info.md index cdf4ceb..a527a70 100644 --- a/stp/prj/st/ST0010/info.md +++ b/intent/st/NOT-STARTED/ST0010/info.md @@ -1,12 +1,12 @@ --- verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 -status: On Hold +intent_version: 2.0.0 +status: Not Started created: 20250603 --- # ST0010: Anthropic MCP Integration -- **Status**: On Hold +- **Status**: Not Started - **Created**: 2025-06-03 - **Completed**: - **Author**: Matthew Sinclair diff --git a/stp/prj/st/ST0010/tasks.md b/intent/st/NOT-STARTED/ST0010/tasks.md similarity index 100% rename from stp/prj/st/ST0010/tasks.md rename to intent/st/NOT-STARTED/ST0010/tasks.md diff --git a/stp/prj/st/NOT-STARTED/ST0015/design.md b/intent/st/NOT-STARTED/ST0015/design.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0015/design.md rename to intent/st/NOT-STARTED/ST0015/design.md diff --git a/stp/prj/st/NOT-STARTED/ST0015/impl.md b/intent/st/NOT-STARTED/ST0015/impl.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0015/impl.md rename to intent/st/NOT-STARTED/ST0015/impl.md diff --git a/stp/prj/st/NOT-STARTED/ST0015/info.md b/intent/st/NOT-STARTED/ST0015/info.md similarity index 93% rename from stp/prj/st/NOT-STARTED/ST0015/info.md rename to intent/st/NOT-STARTED/ST0015/info.md index 3367556..8144bf3 100644 --- a/stp/prj/st/NOT-STARTED/ST0015/info.md +++ b/intent/st/NOT-STARTED/ST0015/info.md @@ -1,6 +1,6 @@ --- verblock: "09 Jul 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.1 +intent_version: 2.0.0 status: Not Started created: 20250709 completed: @@ -40,5 +40,3 @@ This document represents a single steel thread - a self-contained unit of work f 1. Update the status as work progresses 2. Update related documents (design.md, impl.md, etc.) as needed 3. Mark the completion date when finished - -The LLM should assist with implementation details and help maintain this document as work progresses. \ No newline at end of file diff --git a/stp/prj/st/NOT-STARTED/ST0015/tasks.md b/intent/st/NOT-STARTED/ST0015/tasks.md similarity index 100% rename from stp/prj/st/NOT-STARTED/ST0015/tasks.md rename to intent/st/NOT-STARTED/ST0015/tasks.md diff --git a/intent/st/ST0018/design.md b/intent/st/ST0018/design.md new file mode 100644 index 0000000..d303951 --- /dev/null +++ b/intent/st/ST0018/design.md @@ -0,0 +1,107 @@ +--- +verblock: "20 Aug 2025:v0.1: matts - Initial design" +intent_version: 2.3.0 +--- +# ST0018: Design - AGENTS.md Support + +## Architecture Overview + +The implementation introduces a plugin architecture for Intent, with both AGENTS.md support and Claude subagents implemented as plugins. + +### Plugin Structure +``` +intent/plugins/ +├── agents/ # AGENTS.md plugin +│ ├── bin/ # Plugin commands +│ ├── templates/ # AGENTS.md templates +│ └── config.json # Plugin configuration +└── claude/ # Claude plugin + ├── subagents/ # Subagent definitions + └── bin/ # Plugin commands +``` + +## Command Structure Changes + +### Before (v2.2.0) +```bash +intent agents init +intent agents install +intent agents list +``` + +### After (v2.3.0) +```bash +# AGENTS.md commands (new) +intent agents init # Create AGENTS.md +intent agents sync # Update AGENTS.md +intent agents validate # Check compliance + +# Claude subagents (renamed) +intent claude subagents init +intent claude subagents install +intent claude subagents list +``` + +## Plugin Dispatch + +The main `bin/intent` script now handles plugin dispatch: + +1. Detects plugin commands (agents, claude) +2. Routes to appropriate plugin bin script +3. Maintains backward compatibility for core commands + +## AGENTS.md Implementation + +### File Location +- Real file: `intent/llm/AGENTS.md` +- Symlink: `./AGENTS.md` → `intent/llm/AGENTS.md` + +### Generation Logic +The AGENTS.md generator: +1. Detects project type (Node.js, Python, etc.) +2. Extracts build/test commands +3. Includes Intent-specific information +4. Lists installed Claude subagents +5. References steel threads and backlog + +### Template System +- Default template provided +- Framework-specific templates planned +- Customizable sections + +## Migration Strategy + +### Clean Break Approach +- No deprecation period +- Clear error messages guide users to new commands +- intent_upgrade handles v2.2.0 → v2.3.0 migration + +### Migration Tasks +1. Move agents/ → intent/plugins/claude/subagents/ +2. Update command references +3. Generate initial AGENTS.md +4. Update configuration + +## Benefits + +1. **Standards Compliance**: Supports universal AGENTS.md spec +2. **Plugin Architecture**: Extensible for future features +3. **Clean Separation**: Core vs. plugins clearly defined +4. **Better Organization**: All extensions in intent/plugins/ +5. **Future-Proof**: Easy to add new plugins + +## Testing Strategy + +1. Unit tests for each plugin command +2. Integration tests for plugin dispatch +3. Migration tests for upgrade path +4. Validation tests for AGENTS.md generation + +## Success Criteria + +- ✅ Plugin architecture implemented +- ✅ AGENTS.md generation working +- ✅ Claude subagents relocated and functional +- ⏳ All tests passing +- ⏳ Documentation updated +- ⏳ Migration path tested \ No newline at end of file diff --git a/intent/st/ST0018/impl.md b/intent/st/ST0018/impl.md new file mode 100644 index 0000000..3b66da4 --- /dev/null +++ b/intent/st/ST0018/impl.md @@ -0,0 +1,17 @@ +# Implementation - ST0018: Upgrade Intent to support AGENTS.md + +## Implementation + +[Notes on implementation details, decisions, challenges, and their resolutions] + +## Code Examples + +[Key code snippets and examples] + +## Technical Details + +[Specific technical details and considerations] + +## Challenges & Solutions + +[Challenges encountered during implementation and how they were resolved] \ No newline at end of file diff --git a/intent/st/ST0018/info.md b/intent/st/ST0018/info.md new file mode 100644 index 0000000..ac4a5c1 --- /dev/null +++ b/intent/st/ST0018/info.md @@ -0,0 +1,39 @@ +--- +verblock: "20 Aug 2025:v0.1: matts - Initial version" +intent_version: 2.2.0 +status: WIP +created: 20250820 +completed: +--- +# ST0018: Upgrade Intent to support AGENTS.md + +## Objective + +Upgrade Intent to support the AGENTS.md specification - a standardized format for providing instructions to AI coding agents. This involves restructuring the existing agent system into a plugin architecture and adding AGENTS.md generation and management capabilities. + +## Context + +AGENTS.md is an emerging standard for providing instructions to AI coding agents (see https://agents.md/). Intent currently has a Claude Code subagent system but lacks support for the universal AGENTS.md standard. + +This upgrade involves: +1. Restructuring existing "intent agents" commands to "intent claude subagents" +2. Creating a plugin architecture under intent/plugins/ +3. Implementing AGENTS.md generation and management as a plugin +4. Maintaining backward compatibility through migration tools + +## Related Steel Threads + +- ST0017: Add an Intent sub-agent for Claude Code +- ST0016: Rename STP CLI to INTENT (v2.0.0) + +## Context for LLM + +This document represents a single steel thread - a self-contained unit of work focused on implementing a specific piece of functionality. When working with an LLM on this steel thread, start by sharing this document to provide context about what needs to be done. + +### How to update this document + +1. Update the status as work progresses +2. Update related documents (design.md, impl.md, etc.) as needed +3. Mark the completion date when finished + +The LLM should assist with implementation details and help maintain this document as work progresses. \ No newline at end of file diff --git a/intent/st/ST0018/tasks.md b/intent/st/ST0018/tasks.md new file mode 100644 index 0000000..940de10 --- /dev/null +++ b/intent/st/ST0018/tasks.md @@ -0,0 +1,16 @@ +# Tasks - ST0018: Upgrade Intent to support AGENTS.md + +## Tasks + +- [ ] Task 1 +- [ ] Task 2 +- [ ] Task 3 +- ... + +## Task Notes + +[Additional notes about specific tasks if needed] + +## Dependencies + +[Task dependencies and sequencing requirements] \ No newline at end of file diff --git a/intent/st/steel_threads.md b/intent/st/steel_threads.md new file mode 100644 index 0000000..dc9608f --- /dev/null +++ b/intent/st/steel_threads.md @@ -0,0 +1,62 @@ +--- +verblock: "20 Mar 2025:v0.1: Claude - Updated with new directory structure" +stp_version: 1.2.0 +--- +# Steel Threads + +This document serves as an index of all steel threads in the Steel Thread Process (STP) system. A steel thread represents a self-contained unit of work that focuses on implementing a specific piece of functionality. + +## Index + +<!-- BEGIN: STEEL_THREAD_INDEX --> +ID | Title | Status | Created | Completed +-----------|---------------------------|--------------|------------|----------- +ST0016 | Rename STP CLI to INTE... | Completed | 2025-03-11 | 2025-07-27 +ST0015 | Enhanced Steel Thread ... | Not Started | 2025-07-09 | +ST0014 | Directory Structure fo... | Completed | 2025-03-20 | 2025-07-09 +ST0013 | STP Blog Post Series | In Progress | 2025-03-11 | 2025-07-08 +ST0012 | Document Sync Command | Completed | 2025-03-07 | 2025-03-07 +ST0011 | Test Suite Implementation | Completed | 2025-06-03 | 2025-06-03 +ST0010 | Anthropic MCP Integration | Not Started | 2025-06-03 | +ST0009 | Process Refinement | Completed | 2025-03-06 | 2025-06-03 +ST0008 | LLM Integration | Completed | 2025-03-06 | 2025-06-03 +ST0007 | User Documentation | Completed | 2025-03-06 | 2025-06-03 +ST0006 | Help System | Completed | 2025-03-06 | 2025-06-03 +ST0005 | Initialization Command | Completed | 2025-03-06 | 2025-06-03 +ST0004 | Steel Thread Commands | Completed | 2025-03-06 | 2025-06-03 +ST0003 | Template System | Completed | 2025-03-06 | 2025-06-03 +ST0002 | Core Script Framework | Completed | 2025-03-06 | 2025-06-03 +ST0001 | Directory Structure | Completed | 2025-03-06 | 2025-06-03 +<!-- END: STEEL_THREAD_INDEX --> + +## Steel Thread Status Definitions + +<!-- BEGIN: STATUS_DEFINITIONS --> +- **Not Started**: Steel thread has been created but work has not begun (stp/prj/st/NOT-STARTED/) +- **In Progress**: Work is actively being done on this steel thread (stp/prj/st/) +- **Completed**: All tasks have been completed and the steel thread is finished (stp/prj/st/COMPLETED) +- **On Hold**: Work has been temporarily paused (stp/prj/st) +- **Cancelled**: The steel thread has been cancelled and will not be completed (stp/prj/st/CANCELLED) +<!-- END: STATUS_DEFINITIONS --> + +## Context for LLM + +This document provides an overview of all steel threads in the STP project. It helps track the progress of individual pieces of work and serves as a navigation aid for finding specific steel thread documents. + +### How to use this document + +<!-- BEGIN: USAGE_INSTRUCTIONS --> +1. Update the index when creating new steel threads +2. Update the status of steel threads as they progress +3. Add completion dates when steel threads are finished +4. Use this document to quickly locate specific steel thread documents +<!-- END: USAGE_INSTRUCTIONS --> + +The detailed information for each steel thread is contained in its individual document (e.g., ST0001.md). +| ST0014 | Directory Structure for Steel Threads | Completed | | 2025-07-09 | +| ST0015 | Enhanced Steel Thread Templates and File Types | Not Started | 2025-07-09 | | +| ST0017 | Add an Intent sub-agent for Claude Code to Intent | Completed | | 2025-07-27 | +| ST0016 | Rename STP CLI to INTENT (v2.0.0) | Completed | | 2025-07-27 | +| ST0013 | STP Blog Post Series | Completed | | 2025-07-27 | +| ST0018 | Upgrade Intent to support AGENTS.md | WIP | 2025-08-20 | | +| ST0019 | Ash-Expert Agent for Modern Ash Framework Code Quality and Architecture | Completed | | 2025-09-05 | diff --git a/intent/usr/deployment_guide.md b/intent/usr/deployment_guide.md new file mode 100644 index 0000000..831eea4 --- /dev/null +++ b/intent/usr/deployment_guide.md @@ -0,0 +1,525 @@ +--- +verblock: "27 Jul 2025:v0.2: Matthew Sinclair - Updated to Intent v2.1.0" +intent_version: 2.1.0 +--- +# Deployment Guide + +This deployment guide provides instructions for deploying the Intent v2.1.0 system in various environments. It covers installation, configuration, and integration with other tools and workflows. + +## Table of Contents + +1. [Installation](#installation) +2. [Configuration](#configuration) +3. [Integration](#integration) +4. [Maintenance](#maintenance) +5. [Upgrading](#upgrading) +6. [Troubleshooting](#troubleshooting) +7. [New v2.1.0 Features](#new-v210-features) + +## Installation + +### System Requirements + +- POSIX-compatible shell environment (bash, zsh) +- Git (optional, for version control) +- Text editor with markdown support +- Backlog.md (for task management integration) + +### Installation Methods + +#### Global Installation + +Install Intent globally to make it available for all projects: + +```bash +# Clone the Intent repository +git clone https://github.com/matthewsinclair/intent.git ~/intent + +# Add Intent bin directory to PATH in shell profile +echo 'export INTENT_HOME=~/intent' >> ~/.bashrc +echo 'export PATH=$PATH:$INTENT_HOME/bin' >> ~/.bashrc + +# Reload shell configuration +source ~/.bashrc +``` + +#### Project-Specific Installation + +Install Intent within a specific project: + +```bash +# From your project directory +git clone https://github.com/matthewsinclair/intent.git .intent + +# Create a local alias for the project +alias intent='./.intent/bin/intent' +``` + +#### Installation Verification + +Verify the installation: + +```bash +intent help +``` + +This should display the help information for Intent commands. + +#### Installing Backlog.md + +Install Backlog.md for task management: + +```bash +# Install Backlog globally +npm install -g backlog.md + +# Or install locally in your project +npm install backlog.md + +# Verify installation +backlog --version +``` + +Initialize Backlog in your project: + +```bash +# Initialize Backlog with Intent-friendly settings +intent bl init +``` + +## Configuration + +### Environment Variables + +Configure Intent behavior using these environment variables: + +| Variable | Purpose | Default | +|---------------|--------------------------------|-----------------------------------| +| INTENT_HOME | Location of Intent installation| Path to cloned repository | +| INTENT_PROJECT| Current project name | Determined from initialization | +| INTENT_AUTHOR | Default author name | Determined from git configuration | +| INTENT_EDITOR | Preferred text editor | Determined from system defaults | + +Example configuration in `.bashrc` or `.zshrc`: + +```bash +export INTENT_HOME=~/intent +export INTENT_AUTHOR="Jane Doe" +export INTENT_EDITOR="vim" +``` + +### Project Configuration + +Create a project-specific configuration using `.intent/config.json`: + +```json +{ + "project_name": "Project Name", + "author": "Default Author", + "st_prefix": "ST" +} +``` + +## Integration + +### Version Control Integration + +Intent works seamlessly with git and other version control systems: + +#### Recommended .gitignore + +``` +# Intent temporary files +.intent-tmp/ + +# Intent configuration (contains local paths) +.intent/config.json + +# Backlog configuration +backlog/config.yml +backlog/.git/ +``` + +#### Commit Practices + +- Commit steel thread documents along with code changes +- Use steel thread IDs in commit messages for traceability + +#### Branch Strategy + +- Create feature branches based on steel threads +- Name branches using steel thread IDs (e.g., `feature/ST0001`) + +### CI/CD Integration + +To integrate Intent with CI/CD pipelines: + +1. Include the Intent test suite in your CI pipeline: + +```yaml +# Example GitHub Actions workflow +name: Intent Tests + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up test environment + run: ./intent/tests/setup_test_env.sh + - name: Run tests + run: cd intent/tests && ./run_tests.sh +``` + +2. Configure notifications for test failures +3. Add documentation generation steps if needed + +### IDE Integration + +#### VS Code Integration + +1. Install the "Bash Debug" extension for debugging Intent scripts +2. Configure `.vscode/tasks.json` for common Intent tasks: + +```json +{ + "version": "2.1.0", + "tasks": [ + { + "label": "Run Intent Tests", + "type": "shell", + "command": "cd ${workspaceFolder}/intent/tests && ./run_tests.sh", + "group": { + "kind": "test", + "isDefault": true + } + } + ] +} +``` + +#### JetBrains IDE Integration + +1. Configure run configurations for Intent commands +2. Set up file watchers for markdown linting +3. Add shell script run configurations for tests + +### LLM Platform Integration + +#### Claude Code Integration + +To integrate Intent with Claude Code: + +1. Share the `intent/llm/llm_preamble.md` at the beginning of each session +2. Keep relevant steel thread documents in the context window +3. Use structured templates for consistent information sharing + +Example Claude Code command: + +```bash +claude code --context intent/llm/llm_preamble.md --context intent/st/ST0001/info.md +``` + +#### Other LLM Integration + +For other LLM platforms: + +1. Create platform-specific scripts to extract and format Intent context +2. Maintain a consistent formatting pattern when sharing information +3. Consider implementing automatic context extraction helpers + +## Maintenance + +### Regular Maintenance Tasks + +- Update Intent installation periodically +- Review and clean up completed steel threads +- Archive older project documents +- Sync steel thread status with Backlog tasks +- Archive completed tasks in Backlog +- Run `intent doctor` to check configuration health + +### Backup Practices + +- Include Intent documents in regular backups +- Ensure documentation is committed to version control +- Back up Backlog task data (backlog/tasks/, backlog/archive/) +- Export task data periodically: + + ```bash + # Export all tasks to JSON + backlog task list --export > backlog-export-$(date +%Y%m%d).json + ``` + +## Upgrading + +### Upgrading Intent Installation + +To upgrade a global Intent installation: + +```bash +cd $INTENT_HOME +git pull +``` + +To upgrade a project-specific installation: + +```bash +cd my-project/.intent +git pull +``` + +### Migrating Between Versions + +When upgrading Intent with Backlog integration: + +1. **Backup existing data**: + + ```bash + # Backup steel threads + cp -r intent/st intent/st.backup + + # Backup Backlog data + cp -r backlog backlog.backup + ``` + +2. **Run upgrade command**: + + ```bash + intent upgrade + ``` + +3. **Migrate embedded tasks** (if upgrading from pre-Backlog version): + + ```bash + # Migrate all active steel threads + intent migrate --all-active + ``` + +4. **Verify integration**: + + ```bash + # Check task status + intent status report + + # Verify tasks in Backlog + intent bl list + ``` + +## Test Suite Deployment + +The Intent test suite uses Bats (Bash Automated Testing System) and requires proper setup: + +### Test Dependencies + +The test suite requires the following dependencies: + +- Bats: Core testing framework +- bats-support: Support library for better test output +- bats-assert: Assertion library for test validation +- bats-file: File-related assertions + +### Setting Up the Test Environment + +Run the setup script to install all dependencies: + +```bash +cd intent/tests/ +./setup_test_env.sh +``` + +This script will: + +1. Check for existing Bats installation +2. Install Bats if needed +3. Install required Bats libraries +4. Configure the test environment + +### Test Suite Configuration + +The test suite can be configured through environment variables: + +| Variable | Purpose | Default | +|-------------------|-------------------------------------|-------------------------------| +| BATS_LIB_PATH | Location of Bats libraries | intent/tests/lib | +| INTENT_TEST_TEMP | Temporary directory for test files | /tmp/intent-test-XXXXXX | +| INTENT_BIN_PATH | Path to Intent executables | Determined from current path | + +### Running Tests in Different Environments + +```bash +# Set custom paths for testing +export INTENT_BIN_PATH=/custom/path/to/intent/bin +export BATS_LIB_PATH=/custom/path/to/bats/libs + +# Run tests with custom configuration +cd intent/tests/ +./run_tests.sh +``` + +## Troubleshooting + +### Common Issues + +#### Backlog Git Fetch Errors + +If you encounter git fetch errors with Backlog: + +```bash +# Use the Intent wrapper instead of direct backlog commands +intent bl list # Instead of: backlog task list + +# Verify remote operations are disabled +backlog config get remoteOperations +# Should return: false + +# If not disabled, fix it: +backlog config set remoteOperations false +``` + +#### Missing Test Dependencies + +If test dependencies are missing: + +```bash +# Re-run the setup script +cd intent/tests/ +./setup_test_env.sh +``` + +#### Test Failures + +For test failures: + +1. Check the test output for specific errors +2. Verify the Intent installation is correct +3. Ensure all paths are correctly configured +4. Check for permission issues on script files + +#### Permission Errors + +If you encounter permission errors: + +```bash +# Make scripts executable +chmod +x intent/bin/* +chmod +x intent/tests/*.sh +chmod +x intent/tests/lib/*/src/*.bash +``` + +#### Task Synchronization Issues + +If tasks aren't syncing properly with steel threads: + +```bash +# Check task naming convention (should be "ST#### - Description") +intent bl list | grep "ST[0-9]" + +# Manually sync a specific steel thread +intent status sync ST0001 + +# Force sync all active threads +for st in $(intent st list --status "In Progress" | awk '{print $1}' | grep "^ST"); do + intent status sync "$st" +done +``` + +### Diagnostic Tools + +Intent provides several diagnostic tools: + +- `intent help`: Verify command availability +- `intent doctor`: Check configuration and environment health +- `run_tests.sh`: Run tests to verify functionality +- Test failure output: Contains detailed error information + +To debug test failures, examine the test output and check the corresponding script functionality. + +### Getting Help + +If you encounter issues: + +1. Check the troubleshooting section in this guide +2. Review the test output for specific errors +3. Consult the Intent documentation +4. Submit issues to the Intent project repository +5. Refer to the Bats documentation for test-specific problems + +## New v2.1.0 Features + +### Bootstrap Command + +Intent v2.1.0 introduces the `bootstrap` command for quick project initialization: + +```bash +# Bootstrap a new project with Intent +intent bootstrap "My New Project" +``` + +This command: +- Creates the Intent directory structure +- Initializes configuration +- Sets up initial steel thread +- Configures Backlog integration if available + +### Doctor Command + +The new `doctor` command helps diagnose configuration and environment issues: + +```bash +# Check Intent configuration and environment +intent doctor +``` + +This command checks: +- Intent installation integrity +- Configuration file validity +- Environment variable setup +- Directory permissions +- Backlog integration status +- Git configuration + +### Enhanced Directory Structure + +Intent v2.1.0 uses a simplified directory structure: + +``` +intent/ +├── st/ # Steel threads (each in its own directory) +├── docs/ # Technical documentation +├── llm/ # LLM-specific guidelines +├── usr/ # User documentation +└── eng/ # Engineering resources + +.intent/ +└── config.json # Project configuration (JSON format) +``` + +### JSON Configuration + +Intent v2.1.0 migrates from INI-style configuration to JSON: + +```json +{ + "project_name": "My Project", + "author": "Jane Doe", + "created_date": "2025-07-17", + "intent_version": "2.1.0", + "st_prefix": "ST", + "next_st_number": 1 +} +``` + +### Steel Thread Organization + +Each steel thread now has its own directory with standardized files: + +``` +intent/st/ST0001/ +├── info.md # Metadata and overview (required) +├── design.md # Design documentation (optional) +├── impl.md # Implementation details (optional) +└── tasks.md # Task breakdown (optional) +``` diff --git a/stp/usr/reference_guide.md b/intent/usr/reference_guide.md similarity index 57% rename from stp/usr/reference_guide.md rename to intent/usr/reference_guide.md index ed9ebb3..d25cb5c 100644 --- a/stp/usr/reference_guide.md +++ b/intent/usr/reference_guide.md @@ -1,12 +1,10 @@ --- -verblock: "09 Jul 2025:v0.4: Matthew Sinclair - Updated llm command with --symlink option" -stp_version: 1.2.0 +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Updated for Intent v2.1.0" +intent_version: 2.1.0 --- # Reference Guide -# Reference Guide - -This reference guide provides comprehensive information about the Steel Thread Process (STP) system. Unlike the task-oriented User Guide, this reference guide serves as a complete reference for all aspects of the system. +This reference guide provides comprehensive information about the Intent system (v2.1.0). Unlike the task-oriented User Guide, this reference guide serves as a complete reference for all aspects of the system. ## Table of Contents @@ -22,100 +20,124 @@ This reference guide provides comprehensive information about the Steel Thread P ### Core Commands -#### `stp upgrade` +#### `intent upgrade` -Upgrades STP files to the latest format. +Upgrades a project from an older version to Intent v2.1.0. **Usage:** ```bash -stp upgrade [--force] [--organize] +intent upgrade [--backup-dir <dir>] [--no-backup] ``` **Options:** -- `--force`: Force upgrade even for major version differences -- `--organize`: Organize steel thread files into status subdirectories after upgrade +- `--backup-dir <dir>`: Custom backup directory (default: .intent-backup-TIMESTAMP) +- `--no-backup`: Skip backup creation (not recommended) **Example:** ```bash -# Basic upgrade (updates metadata only) -stp upgrade +intent upgrade +``` -# Force upgrade with major version differences -stp upgrade --force +**Output:** + +- Creates timestamped backup of existing structure +- Migrates directory structure (legacy paths → intent/*) +- Converts YAML configuration to JSON +- Updates all file references and frontmatter +- Converts single steel thread files to directory structure +- Reports upgrade status for each step -# Upgrade and organize files by status -stp upgrade --organize +Example output: -# Force upgrade and organize -stp upgrade --force --organize ``` +Starting upgrade to Intent v2.1.0... -**Output:** +Detected version: 1.2.1 +Creating backup at .intent-backup-20250717-123456... +Backup completed. -- Updates all STP files with the latest format and metadata -- Adds or updates STP version information in YAML frontmatter -- Adds or updates missing metadata fields -- Adds section markers to steel_threads.md for sync -- **v1.2.0 → v1.2.1**: Migrates steel threads from single files to directory structure - - Creates directories for each steel thread (e.g., `ST0001/`) - - Splits content into separate files: `info.md`, `design.md`, `impl.md`, `tasks.md`, `results.md` - - Backs up original files to `.stp_backup/1.2.1/` -- If `--organize` is used, moves steel thread directories to status subdirectories: - - `COMPLETED/` for completed steel threads - - `NOT-STARTED/` for not started steel threads - - `CANCELLED/` for cancelled steel threads - - Directories with "In Progress" or "On Hold" status remain in the main directory -- Reports upgrade status for each file processed +Migrating directory structure... + legacy/prj → intent + legacy/eng → intent/eng + legacy/usr → intent/usr + legacy/llm → intent/llm -Example output: +Converting configuration... + legacy config (YAML) → .intent/config.json (JSON) + +Migrating steel threads... + Converting ST0001.md → ST0001/info.md + Converting ST0002.md → ST0002/info.md +Updating file references... + Updated 15 files with Intent naming + +Upgrade complete! Intent v2.1.0 is ready. +Run 'intent doctor' to verify configuration. ``` -Starting STP upgrade process... -Current STP version: 1.2.1 -Detected steel thread files in old format (single files). -Migration to directory structure is required for v1.2.1. +#### `intent bootstrap` -Migrate steel threads to directory structure? (Y/n) Y +Bootstraps Intent environment and dependencies. -Migrating steel threads to directory structure (v1.2.0 → v1.2.1) +**Usage:** -Found 14 steel thread files to migrate +```bash +intent bootstrap [--check] +``` - Migrating ST0001... - Created: info.md, design.md, impl.md, tasks.md - Migrating ST0002... - Created: info.md, design.md, impl.md +**Options:** -Scanning for STP files to upgrade... -Checking steel_threads.md... -Added section markers to stp/prj/st/steel_threads.md -Upgrading steel thread files... -Processing stp/prj/st/ST0001.md (current version: 0.0.0) - Missing Status field in file - Missing Created field in file - Updating file to add missing metadata fields... -Updated: stp/prj/st/ST0001.md -Processing stp/prj/st/ST0002.md (current version: 1.0.0) - Already at latest version, no update needed. +- `--check`: Only check requirements without installing -Running sync to update steel_threads.md... -Updated steel threads index file: stp/prj/st/steel_threads.md +**Example:** -STP upgrade complete. +```bash +intent bootstrap ``` -#### `stp init` +**Output:** + +- Checks system requirements +- Installs missing dependencies +- Configures shell environment +- Validates Intent installation -Initializes a new STP project. +#### `intent doctor` + +Checks and diagnoses Intent configuration and environment. **Usage:** ```bash -stp init <project_name> [directory] +intent doctor +``` + +**Example:** + +```bash +intent doctor +``` + +**Output:** + +- Validates Intent installation +- Checks project configuration +- Verifies directory structure +- Reports any issues found +- Suggests fixes for common problems + +#### `intent init` + +Initializes a new Intent project. + +**Usage:** + +```bash +intent init <project_name> [directory] ``` **Parameters:** @@ -126,51 +148,37 @@ stp init <project_name> [directory] **Example:** ```bash -stp init "My Project" ./my-project +intent init "My Project" ./my-project ``` **Output:** -- Creates STP directory structure -- Initializes template documents -- Creates initial configuration +- Creates Intent directory structure +- Creates `.intent/config.json` with project configuration +- Initializes `intent/` directories (st/, eng/, usr/, llm/) +- Creates `CLAUDE.md` with project instructions +- Creates `intent/wip.md` for work tracking -#### `stp st` +#### `intent st` Manages steel threads. **Usage:** ```bash -stp st <command> [options] [arguments] -``` - -**Steel Thread Structure (v1.2.1+):** - -Starting with STP v1.2.1, steel threads are organized as directories rather than single files: - -``` -stp/prj/st/ -├── ST0001/ -│ ├── info.md # Main information (metadata, objective, context) -│ ├── design.md # Design decisions and approach -│ ├── impl.md # Implementation details -│ ├── tasks.md # Task tracking -│ └── results.md # Results and outcomes -└── ST0002/ - └── info.md # Minimum required file +intent st <command> [options] [arguments] ``` **Subcommands:** -`stp st new` +`intent st new` -Creates a new steel thread directory with template files. +Creates a new steel thread. **Usage:** ```bash -stp st new <title> +intent st new <title> ``` **Parameters:** @@ -180,22 +188,24 @@ stp st new <title> **Example:** ```bash -stp st new "Implement User Authentication" +intent st new "Implement User Authentication" ``` **Output:** -- Creates directory `ST####/` with the next available number -- Populates `info.md` with metadata and template sections -- Creates empty `design.md`, `impl.md`, `tasks.md`, and `results.md` files -`stp st done` +- Creates directory `intent/st/ST####/` +- Creates `info.md` with metadata and template +- Auto-increments thread ID +- Reports: "Created new steel thread: ST####" + +`intent st done` Marks a steel thread as complete. **Usage:** ```bash -stp st done <id> +intent st done <id> ``` **Parameters:** @@ -205,28 +215,28 @@ stp st done <id> **Example:** ```bash -stp st done ST0001 +intent st done ST0001 ``` -`stp st list` +`intent st list` Lists all steel threads. **Usage:** ```bash -stp st list [--status <status>] [--width <columns>] +intent st list [--status <status>] [--width <columns>] ``` **Options:** -- `--status`: Filter by status (optional) +- `--status`: Filter by status ("Not Started", "In Progress", "Completed") - `--width`: Set the output table width in columns (optional, defaults to terminal width) **Example:** ```bash -stp st list --status "In Progress" --width 100 +intent st list --status "In Progress" --width 100 ``` **Output:** @@ -239,149 +249,139 @@ ST0002 | Design Database Schema | In Progress | 2025-03-07 | ST0001 | Project Setup | Completed | 2025-03-05 | 2025-03-06 ``` -`stp st sync` +`intent st show` -Synchronizes the steel_threads.md document with individual steel thread files. +Displays the contents of a steel thread. **Usage:** ```bash -stp st sync [--write] [--width <columns>] +intent st show <id> [file] ``` -**Options:** +**Parameters:** -- `--write`: Update the steel_threads.md file (optional, without this flag output is sent to stdout) -- `--width`: Set the output table width in columns (optional, defaults to terminal width) +- `id`: ID of the steel thread (required) +- `file`: Specific file to show (optional: info, design, impl, tasks, results) **Example:** ```bash -stp st sync --write --width 100 -``` - -**Output:** - -Updates the steel_threads.md file with the current status of all steel thread files, preserving content outside the marked section: - -```markdown -# Steel Threads - -This document serves as an index of all steel threads in the project. - -## Index - -<!-- BEGIN: STEEL_THREAD_INDEX --> -| ID | Title | Status | Created | Completed | -|-------------------------|--------------------------------------|-------------|------------|-------------| -| [ST0003](./ST0003.md) | Implement Feature X | In Progress | 2025-03-08 | | -| [ST0002](./ST0002.md) | Design Database Schema | In Progress | 2025-03-07 | | -| [ST0001](./ST0001.md) | Project Setup | Completed | 2025-03-05 | 2025-03-06 | -<!-- END: STEEL_THREAD_INDEX --> - -## Notes - -Additional notes about steel threads can be added here. +intent st show ST0001 +intent st show ST0001 design ``` -`stp st show` +`intent st edit` -Shows details of a specific steel thread. +Opens a steel thread file for editing. **Usage:** ```bash -stp st show <id> [file] +intent st edit <id> [file] ``` **Parameters:** - `id`: ID of the steel thread (required) -- `file`: Specific file to show (optional, defaults to 'info') - - `info`: Main information file (default) - - `design`: Design decisions and approach - - `impl`: Implementation details - - `tasks`: Task tracking - - `results`: Results and outcomes - - `all`: Show all files combined +- `file`: Specific file to edit (optional: info, design, impl, tasks, results) -**Examples:** +**Example:** ```bash -stp st show ST0001 # Shows info.md -stp st show ST0001 design # Shows design.md -stp st show ST0001 all # Shows all files +intent st edit ST0001 +intent st edit ST0001 tasks ``` -`stp st edit` +#### `intent st repair` -Opens a steel thread file in your default editor. +Repairs malformed steel thread metadata. **Usage:** ```bash -stp st edit <id> [file] +intent st repair [id] [--write] ``` +**Purpose:** + +Fixes common metadata issues in steel threads that may occur after migrations or manual edits: +- Repairs malformed YAML frontmatter (e.g., escaped newlines) +- Updates legacy field names (stp_version → intent_version) +- Reconciles conflicting status values between frontmatter and body +- Validates and fixes date formats +- Adds missing required fields with sensible defaults + **Parameters:** -- `id`: ID of the steel thread (required) -- `file`: Specific file to edit (optional, defaults to 'info') - - `info`: Main information file (default) - - `design`: Design decisions and approach - - `impl`: Implementation details - - `tasks`: Task tracking - - `results`: Results and outcomes +- `id`: ID of specific steel thread to repair (optional) +- `--write`: Apply repairs (without this flag, performs dry-run) -**Examples:** +**Options:** -```bash -stp st edit ST0001 # Edits info.md -stp st edit ST0001 impl # Edits impl.md -``` +- Without `--write`: Shows what would be changed (dry-run mode) +- With `--write`: Actually performs the repairs and organizes files -**Note:** If the specified file doesn't exist, it will be created. +**Examples:** -`stp st organize` +```bash +# Dry-run repair on all steel threads +intent st repair -Organizes steel thread directories by status. +# Actually repair all steel threads +intent st repair --write -**Usage:** +# Dry-run repair on specific steel thread +intent st repair ST0001 -```bash -stp st organize [--write] +# Actually repair specific steel thread +intent st repair ST0001 --write ``` -**Options:** +**Expected Output (dry-run):** -- `--write`: Actually move directories (optional, without this flag it shows what would be done) +``` +Processing: ST0001 + - Found malformed frontmatter + Would fix malformed frontmatter + - Found legacy stp_version field + Would update to intent_version + - Found conflicting status: + Frontmatter: Not Started + Body: Completed + Would update frontmatter status to: Completed -**Effect:** +Dry run complete. Use --write to apply changes. +``` -Moves steel thread directories to subdirectories based on their status: -- `COMPLETED/` - For completed steel threads -- `NOT-STARTED/` - For not-started steel threads -- `CANCELLED/` - For cancelled steel threads -- Main directory - For in-progress and on-hold steel threads +**Expected Output (with --write):** -**Example:** +``` +Processing: ST0001 + - Found malformed frontmatter + Fixed malformed frontmatter + - Found legacy stp_version field + Updated to intent_version + - Found conflicting status: + Frontmatter: Not Started + Body: Completed + Updated frontmatter status to: Completed -```bash -# Preview what would be done -stp st organize +Repairs complete. -# Actually organize the directories -stp st organize --write +Running organize to ensure correct file locations... +Moved ST0001 to intent/st/COMPLETED +Updated steel threads index. ``` -#### `stp help` +#### `intent help` Displays help information. **Usage:** ```bash -stp help [command] +intent help [command] ``` **Parameters:** @@ -391,52 +391,53 @@ stp help [command] **Example:** ```bash -stp help st +intent help st +intent help task ``` -#### `stp llm` +#### `intent llm` Commands for LLM integration and assistance. **Usage:** ```bash -stp llm <subcommand> [options] +intent llm <subcommand> [options] ``` **Purpose:** -Provides utilities for working with Large Language Models (LLMs) in the context of STP. Helps LLMs understand how to use STP effectively and facilitates better collaboration between developers and AI assistants. +Provides utilities for working with Large Language Models (LLMs) in the context of Intent. Helps LLMs understand how to use Intent effectively and facilitates better collaboration between developers and AI assistants. **Subcommands:** -`stp llm usage_rules` +`intent llm usage_rules` -Displays the complete STP usage patterns and workflows documentation. +Displays the complete Intent usage patterns and workflows documentation. **Usage:** ```bash -stp llm usage_rules +intent llm usage_rules ``` **Example:** ```bash # Display usage rules -stp llm usage_rules +intent llm usage_rules # Create symlink in current directory -stp llm usage_rules --symlink +intent llm usage_rules --symlink # Create symlink in specific directory -stp llm usage_rules --symlink ~/my-project +intent llm usage_rules --symlink ~/my-project # Pipe to less for easier reading -stp llm usage_rules | less +intent llm usage_rules | less # Save to a file -stp llm usage_rules > stp-usage-rules.md +intent llm usage_rules > intent-usage-rules.md ``` **Options:** @@ -445,36 +446,35 @@ stp llm usage_rules > stp-usage-rules.md **Notes:** -- The usage rules document is located at `stp/eng/usage-rules.md` +- The usage rules document is located at `intent/llm/usage-rules.md` - It follows the pattern established by the Elixir Hex package 'usage_rules' -- The document can be regenerated using the prompt at `stp/eng/prompts/regenerate_usage_rules.md` - The --symlink option creates a symlink named 'usage-rules.md' for integration with other tools -#### `stp bl` / `stp backlog` +#### `intent bl` / `intent backlog` -STP wrapper for Backlog.md task management. +Intent wrapper for Backlog.md task management. **Usage:** ```bash -stp bl <command> [options] [arguments] -stp backlog <command> [options] [arguments] +intent bl <command> [options] [arguments] +intent backlog <command> [options] [arguments] ``` **Purpose:** -Provides a streamlined interface to Backlog.md that avoids common issues like git fetch errors and provides shortcuts for STP workflows. +Provides a streamlined interface to Backlog.md that avoids common issues like git fetch errors and provides shortcuts for Intent workflows. Respects the `backlog_list_status` configuration setting. **Subcommands:** -`stp bl init` +`intent bl init` -Initializes Backlog with STP-friendly settings. +Initializes Backlog with Intent-friendly settings. **Usage:** ```bash -stp bl init +intent bl init ``` **Effect:** @@ -483,14 +483,14 @@ stp bl init - Disables remote operations to prevent git errors - Sets default status to "To Do" -`stp bl create` +`intent bl create` Creates a task linked to a steel thread. **Usage:** ```bash -stp bl create <ST####> <title> +intent bl create <ST####> <title> ``` **Parameters:** @@ -501,67 +501,67 @@ stp bl create <ST####> <title> **Example:** ```bash -stp bl create ST0014 "Add validation logic" +intent bl create ST0014 "Add validation logic" ``` -`stp bl list` +`intent bl list` Lists all tasks without git fetch errors. **Usage:** ```bash -stp bl list +intent bl list ``` **Note:** Automatically adds `--plain` flag to prevent git operations. -`stp bl board` +`intent bl board` Displays tasks in Kanban board view. **Usage:** ```bash -stp bl board +intent bl board ``` -`stp bl task` +`intent bl task` Manages individual tasks. **Usage:** ```bash -stp bl task <subcommand> [options] +intent bl task <subcommand> [options] ``` **Example:** ```bash -stp bl task edit task-5 --status Done +intent bl task edit task-5 --status Done ``` -#### `stp task` +#### `intent task` Manages Backlog tasks linked to steel threads. **Usage:** ```bash -stp task <command> [options] [arguments] +intent task <command> [options] [arguments] ``` **Subcommands:** -`stp task create` +`intent task create` Creates a new task linked to a steel thread. **Usage:** ```bash -stp task create <ST####> <title> +intent task create <ST####> <title> ``` **Parameters:** @@ -572,17 +572,17 @@ stp task create <ST####> <title> **Example:** ```bash -stp task create ST0014 "Implement error handling" +intent task create ST0014 "Implement error handling" ``` -`stp task list` +`intent task list` Lists all tasks for a specific steel thread. **Usage:** ```bash -stp task list <ST####> +intent task list <ST####> ``` **Parameters:** @@ -592,7 +592,7 @@ stp task list <ST####> **Example:** ```bash -stp task list ST0014 +intent task list ST0014 ``` **Output:** @@ -604,40 +604,40 @@ task-1 [done] ST0014 - Create directory structure task-2 [todo] ST0014 - Add unit tests ``` -`stp task sync` +`intent task sync` Synchronizes task status with steel thread. **Usage:** ```bash -stp task sync <ST####> +intent task sync <ST####> ``` **Parameters:** - `ST####`: Steel thread ID (required) -#### `stp status` +#### `intent status` Synchronizes steel thread status based on Backlog task completion. **Usage:** ```bash -stp status <command> [options] [arguments] +intent status <command> [options] [arguments] ``` **Subcommands:** -`stp status show` +`intent status show` Displays status of steel thread and its tasks. **Usage:** ```bash -stp status show <ST####> +intent status show <ST####> ``` **Parameters:** @@ -659,14 +659,14 @@ Task Summary: Recommended Status: In Progress ``` -`stp status sync` +`intent status sync` Updates steel thread status based on task completion. **Usage:** ```bash -stp status sync <ST####> [--dry-run] +intent status sync <ST####> [--dry-run] ``` **Parameters:** @@ -677,24 +677,24 @@ stp status sync <ST####> [--dry-run] - `--dry-run`: Preview changes without updating -`stp status report` +`intent status report` Generates status report for all active threads. **Usage:** ```bash -stp status report +intent status report ``` -#### `stp migrate` +#### `intent migrate` Migrates embedded tasks from steel threads to Backlog. **Usage:** ```bash -stp migrate [options] <ST####> +intent migrate [options] <ST####> ``` **Parameters:** @@ -710,13 +710,13 @@ stp migrate [options] <ST####> ```bash # Migrate a single steel thread -stp migrate ST0014 +intent migrate ST0014 # Preview migration -stp migrate --dry-run ST0014 +intent migrate --dry-run ST0014 # Migrate all active threads -stp migrate --all-active +intent migrate --all-active ``` **Effect:** @@ -730,11 +730,11 @@ stp migrate --all-active #### Test Suite Commands -The STP test suite provides commands for verifying system functionality: +The Intent test suite provides commands for verifying system functionality: ```bash # Run all tests -cd stp/tests/ +cd intent/tests/ ./run_tests.sh # Run specific test suite @@ -764,15 +764,15 @@ The test environment setup script installs necessary dependencies, including: ### Steel Thread Document Format -Steel thread documents (located in `stp/prj/st/ST####.md`) use a standardized format with two ways to store metadata: +Steel thread documents (located in `intent/st/ST####/`) use a standardized format with two ways to store metadata: -#### STP Versioning +#### Intent Versioning -Each STP file includes version information to track compatibility: +Each Intent file includes version information to track compatibility: ```yaml --- -stp_version: 1.2.0 +intent_version: 2.1.0 --- ``` @@ -782,7 +782,7 @@ The version follows semantic versioning (MAJOR.MINOR.PATCH) where: - MINOR: New features in a backward-compatible manner - PATCH: Backward-compatible bug fixes -When running `stp upgrade`, the system checks this version to determine what upgrades are needed. +When running `intent upgrade`, the system checks this version to determine what upgrades are needed. #### YAML Frontmatter @@ -821,7 +821,7 @@ When using both formats, the document body metadata takes precedence over YAML f #### Section Markers in steel_threads.md -The steel_threads.md document uses HTML comment markers to identify sections that can be automatically updated by the `stp st sync` command: +The steel_threads.md document uses HTML comment markers to identify sections that can be automatically updated by the `intent st sync` command: ```markdown <!-- BEGIN: STEEL_THREAD_INDEX --> @@ -835,7 +835,7 @@ These markers should not be removed from the document, as they enable automatic #### Work in Progress (WIP) Template -Location: `stp/prj/wip.md` +Location: `intent/wip.md` Purpose: Tracks current development focus and active steel threads. @@ -848,7 +848,7 @@ Structure: #### Steel Thread Templates -Location: `stp/prj/st/` +Location: `intent/st/` Purpose: Defines and tracks individual units of work. @@ -864,7 +864,7 @@ Structure: ### Engineering Templates -Engineering templates are located in `stp/_templ/eng/`: +Engineering templates are located in `intent/_templ/eng/`: - `tpd/`: Technical Product Design templates - `_technical_product_design.md`: Main TPD template @@ -874,7 +874,7 @@ These templates provide structured formats for capturing technical design decisi ### User Documentation Templates -User documentation templates are located in `stp/_templ/usr/`: +User documentation templates are located in `intent/_templ/usr/`: - `_user_guide.md`: Template for task-oriented user instructions - `_reference_guide.md`: Template for comprehensive reference information @@ -882,22 +882,26 @@ User documentation templates are located in `stp/_templ/usr/`: ### LLM Templates -LLM-specific templates are located in `stp/_templ/llm/`: +LLM-specific templates are located in `intent/_templ/llm/`: - `_llm_preamble.md`: Template for creating context preambles for LLM sessions ## Directory Structure ``` -STP/ -├── stp/ # Main STP directory +Intent/ +├── intent/ # Main Intent directory │ ├── _templ/ # Templates directory -│ ├── prj/ # Project documentation -│ │ ├── st/ # Steel threads -│ │ │ ├── COMPLETED/ # Completed steel threads -│ │ │ ├── NOT-STARTED/ # Not started steel threads -│ │ │ └── CANCELLED/ # Cancelled steel threads -│ │ └── wip.md # Work in progress +│ ├── st/ # Steel threads +│ │ ├── ST####/ # Individual steel thread directories +│ │ │ ├── info.md # Steel thread metadata +│ │ │ ├── design.md # Design documentation +│ │ │ ├── impl.md # Implementation notes +│ │ │ └── tasks.md # Task tracking +│ │ ├── COMPLETED/ # Completed steel threads +│ │ ├── NOT-STARTED/ # Not started steel threads +│ │ └── CANCELLED/ # Cancelled steel threads +│ ├── wip.md # Work in progress │ ├── eng/ # Engineering docs │ │ └── tpd/ # Technical Product Design │ ├── usr/ # User documentation @@ -916,7 +920,9 @@ STP/ │ ├── lib/ # Test helper libraries │ ├── fixtures/ # Test fixtures │ └── run_tests.sh # Test runner script -├── bin/ # STP scripts (executable) +├── bin/ # Intent scripts (executable) +├── .intent/ # Intent configuration +│ └── config.json # Project configuration └── backlog/ # Backlog.md task management ├── tasks/ # Active tasks ├── drafts/ # Draft tasks @@ -930,24 +936,26 @@ STP/ | Variable | Purpose | Default | |-------------|------------------------------|-----------------------------------| -| STP_HOME | Location of STP installation | Path to cloned repository | -| STP_PROJECT | Current project name | Determined from initialization | -| STP_AUTHOR | Default author name | Determined from git configuration | -| STP_EDITOR | Preferred text editor | Determined from system defaults | +| INTENT_HOME | Location of Intent installation | Path to cloned repository | +| INTENT_PROJECT | Current project name | Determined from initialization | +| INTENT_AUTHOR | Default author name | Determined from git configuration | +| INTENT_EDITOR | Preferred text editor | Determined from system defaults | ### Project Configuration -Location: `stp/.config/config` +Location: `.intent/config.json` -Format: INI-style configuration file +Format: JSON configuration file Example: -```ini -# STP Project Configuration -PROJECT_NAME="Project Name" -AUTHOR="Default Author" -ST_PREFIX="ST" +```json +{ + "project_name": "Project Name", + "author": "Default Author", + "intent_version": "2.1.0", + "st_prefix": "ST" +} ``` ## Best Practices @@ -970,12 +978,12 @@ ST_PREFIX="ST" ### Task Management with Backlog -- Use `stp bl` wrapper instead of `backlog` directly to avoid git errors +- Use `intent bl` wrapper instead of `backlog` directly to avoid git errors - Create tasks linked to steel threads for traceability - Keep tasks granular (1-2 days of work) - Regularly sync steel thread status with task completion - Use task status values: "To Do", "In Progress", "Done" -- Migrate existing embedded tasks using `stp migrate` +- Migrate existing embedded tasks using `intent migrate` ### LLM Collaboration @@ -994,17 +1002,17 @@ ST_PREFIX="ST" | Context Window | The amount of text an LLM can process in a single interaction | | Canned Prompt | A pre-defined, reusable instruction template for an LLM | | WIP | Work in Progress, a document tracking current development focus | -| Backlog | Task management system integrated with STP for fine-grained work tracking | +| Backlog | Task management system integrated with Intent for fine-grained work tracking | | Task | Individual unit of work linked to a steel thread, tracked in Backlog | | Task Status | State of a task: "To Do", "In Progress", or "Done" | ## Backlog.md Integration -This section provides comprehensive documentation for the integration between STP (Steel Thread Process) and Backlog.md for enhanced task management. The integration maintains STP's strength in intent capture while leveraging Backlog.md's powerful task tracking capabilities. +This section provides comprehensive documentation for the integration between Intent and Backlog.md for enhanced task management. The integration maintains Intent's strength in intent capture while leveraging Backlog.md's powerful task tracking capabilities. ### Overview -The integration between STP and Backlog.md provides: +The integration between Intent and Backlog.md provides: - **Intent Capture**: Steel threads for high-level objectives and context - **Task Management**: Backlog for granular task tracking with rich metadata @@ -1013,11 +1021,11 @@ The integration between STP and Backlog.md provides: ### Architecture -#### STP Responsibilities +#### Intent Responsibilities - **Intent Capture**: High-level objectives and context in steel thread documents -- **Design Documentation**: Detailed design specifications (ST####_design.md) -- **Implementation Records**: As-built documentation (ST####_impl.md) +- **Design Documentation**: Detailed design specifications (ST####/design.md) +- **Implementation Records**: As-built documentation (ST####/impl.md) - **Process Coordination**: Overall workflow and steel thread lifecycle #### Backlog.md Responsibilities @@ -1027,22 +1035,27 @@ The integration between STP and Backlog.md provides: - **Task Organisation**: Labels, priorities, dependencies, and subtasks - **Visualisation**: Kanban board and browser interface -### Using the STP Backlog Wrapper +### Using the Intent Backlog Wrapper -STP provides a wrapper command `stp backlog` (or `stp bl` for short) that streamlines Backlog usage: +Intent provides a wrapper command `intent backlog` (or `intent bl` for short) that streamlines Backlog usage: ```bash -# Initialize backlog with STP-friendly settings -stp bl init +# Initialize backlog with Intent-friendly settings +intent bl init # List tasks without git fetch errors -stp bl list +intent bl list # Create tasks linked to steel threads -stp bl create ST0014 "Add validation" +intent bl create ST0014 "Add validation" # View Kanban board -stp bl board +intent bl board + +# Zero-pad task IDs retroactively +intent bl task pad task-9 --size 3 # Pad single task to task-009 +intent bl task pad --all --size 3 # Pad all tasks to 3 digits +intent bl task pad --all # Pad using configured size ``` The wrapper automatically: @@ -1051,6 +1064,32 @@ The wrapper automatically: - Disables remote operations for local projects - Provides shortcuts for common workflows +#### Task ID Padding + +The `intent bl task pad` command allows you to retroactively zero-pad task IDs for consistent sorting and display: + +```bash +# Pad a specific task +intent bl task pad task-9 --size 3 # Changes task-9 to task-009 + +# Pad all tasks to 3 digits +intent bl task pad --all --size 3 + +# Use configured padding (reads from zeroPaddedIds config) +intent bl task pad --all +``` + +This command: +- Updates both the task filename and the `id:` field in the YAML frontmatter +- Processes tasks in both `backlog/tasks/` and `backlog/archive/tasks/` +- Only pads tasks that need it (skips already padded tasks) +- Is idempotent - running it multiple times is safe + +After padding, ensure new tasks use the same padding by setting: +```bash +intent bl config set zeroPaddedIds 3 +``` + ### Naming Conventions #### Backlog Task Naming @@ -1071,7 +1110,7 @@ ST0014 - Add unit tests #### File Organisation -- Steel thread documents remain in `/stp/prj/st/` +- Steel thread documents remain in `/intent/st/` - Backlog tasks are stored in `/backlog/tasks/` - Task files are named: `task-<id> - <title>.md` @@ -1081,15 +1120,15 @@ ST0014 - Add unit tests ```bash # Create the steel thread -stp st new "My New Feature" +intent st new "My New Feature" # Returns: Created ST0015 # Create associated tasks using the backlog wrapper -stp bl create ST0015 "Design API structure" -stp bl create ST0015 "Implement core logic" -stp bl create ST0015 "Create registration flow" -stp bl create ST0015 "Add session management" -stp bl create ST0015 "Write integration tests" +intent bl create ST0015 "Design API structure" +intent bl create ST0015 "Implement core logic" +intent bl create ST0015 "Create registration flow" +intent bl create ST0015 "Add session management" +intent bl create ST0015 "Write integration tests" ``` #### 2. Task Lifecycle @@ -1129,26 +1168,26 @@ Steel thread status is determined by task states: - **Completed**: All tasks done or archived - **Cancelled**: Manual designation with tasks archived -Use `stp status` to sync: +Use `intent status` to sync: ```bash -stp status sync ST0015 +intent status sync ST0015 ``` #### 4. Viewing Tasks ```bash # View all tasks for a steel thread -stp task list ST0015 +intent task list ST0015 # View all tasks without git errors -stp bl list +intent bl list # View in Kanban board -stp bl board +intent bl board # View in browser -stp bl browser +intent bl browser ``` ### Steel Thread Document Structure @@ -1158,7 +1197,7 @@ With Backlog integration, steel thread documents focus on intent and context: ```markdown --- verblock: "08 Jul 2025:v0.1: Author Name - Initial version" -stp_version: 1.2.0 +intent_version: 2.1.0 status: In Progress created: 20250708 completed: @@ -1175,7 +1214,7 @@ Background information and rationale Strategic approach and key decisions ## Tasks -Tasks are tracked in Backlog. View with: `stp task list ST0015` +Tasks are tracked in Backlog. View with: `intent task list ST0015` ## Implementation Notes Key technical decisions and learnings @@ -1190,10 +1229,10 @@ For existing steel threads with embedded task lists: ```bash # Migrate a specific steel thread -stp migrate ST0014 +intent migrate ST0014 # Migrate all active threads -stp migrate --all-active +intent migrate --all-active ``` This will: @@ -1227,7 +1266,7 @@ This will: #### Regular Maintenance -- Run `stp status sync` regularly +- Run `intent status sync` regularly - Archive completed tasks weekly - Review and promote drafts in planning sessions @@ -1237,54 +1276,54 @@ This will: ```bash # 1. Create steel thread for high-level planning -stp st new "Implement user authentication" +intent st new "Implement user authentication" # Output: Created ST0015 # 2. Create implementation tasks -stp task create ST0015 "Design auth database schema" -stp task create ST0015 "Implement login endpoint" -stp task create ST0015 "Create registration flow" -stp task create ST0015 "Add session management" -stp task create ST0015 "Write integration tests" +intent task create ST0015 "Design auth database schema" +intent task create ST0015 "Implement login endpoint" +intent task create ST0015 "Create registration flow" +intent task create ST0015 "Add session management" +intent task create ST0015 "Write integration tests" # 3. Work through tasks -stp bl board # View Kanban board +intent bl board # View Kanban board backlog task edit <id> --status in-progress # 4. Sync status back to steel thread -stp status sync ST0015 +intent status sync ST0015 ``` #### Research and Design ```bash # 1. Create steel thread for research -stp st new "Research caching strategies" +intent st new "Research caching strategies" # 2. Create investigation tasks -stp task create ST0016 "Review Redis capabilities" -stp task create ST0016 "Benchmark Memcached performance" -stp task create ST0016 "Evaluate in-memory options" -stp task create ST0016 "Document recommendations" +intent task create ST0016 "Review Redis capabilities" +intent task create ST0016 "Benchmark Memcached performance" +intent task create ST0016 "Evaluate in-memory options" +intent task create ST0016 "Document recommendations" # 3. Track progress -stp task list ST0016 +intent task list ST0016 ``` #### Bug Fix Workflow ```bash # 1. Create steel thread for bug -stp st new "Fix authentication timeout issue" +intent st new "Fix authentication timeout issue" # 2. Create diagnostic and fix tasks -stp task create ST0017 "Reproduce timeout issue" -stp task create ST0017 "Debug session handling" -stp task create ST0017 "Implement fix" -stp task create ST0017 "Add regression test" +intent task create ST0017 "Reproduce timeout issue" +intent task create ST0017 "Debug session handling" +intent task create ST0017 "Implement fix" +intent task create ST0017 "Add regression test" # 3. Fast status check -stp status show ST0017 +intent status show ST0017 ``` ### Troubleshooting @@ -1296,7 +1335,7 @@ stp status show ST0017 - Don't manually edit task IDs 2. **Status Mismatch** - - Run `stp status sync` to update + - Run `intent status sync` to update - Check for tasks in unexpected states 3. **Missing Tasks** @@ -1304,13 +1343,13 @@ stp status show ST0017 - Verify task wasn't archived 4. **Git Fetch Errors** - - Use `stp bl` wrapper instead of `backlog` directly + - Use `intent bl` wrapper instead of `backlog` directly - The wrapper adds `--plain` flag automatically #### Getting Help -- Run `stp help` for STP commands -- Run `stp help backlog` for STP's Backlog wrapper +- Run `intent help` for Intent commands +- Run `intent help backlog` for Intent's Backlog wrapper - Run `backlog help` for native Backlog commands ### Testing @@ -1319,7 +1358,7 @@ The integration includes comprehensive test coverage: ```bash # Run all integration tests -cd stp/tests +cd intent/tests ./run_tests.sh task ./run_tests.sh status ./run_tests.sh migrate @@ -1332,6 +1371,6 @@ bats migrate/migrate_test.bats Test files are located in: -- `stp/tests/task/task_test.bats` - Task command tests -- `stp/tests/status/status_test.bats` - Status command tests -- `stp/tests/migrate/migrate_test.bats` - Migration command tests +- `intent/tests/task/task_test.bats` - Task command tests +- `intent/tests/status/status_test.bats` - Status command tests +- `intent/tests/migrate/migrate_test.bats` - Migration command tests diff --git a/intent/usr/user_guide.md b/intent/usr/user_guide.md new file mode 100644 index 0000000..4204ef3 --- /dev/null +++ b/intent/usr/user_guide.md @@ -0,0 +1,677 @@ +--- +verblock: "27 Jul 2025:v2.1.0: Matthew Sinclair - Updated to Intent v2.1.0" +intent_version: 2.1.0 +--- +# User Guide + +This user guide provides task-oriented instructions for using the Intent system. It explains how to accomplish common tasks and provides workflow guidance. + +## Table of Contents + +1. [Introduction](#introduction) +2. [Installation](#installation) +3. [Getting Started](#getting-started) +4. [Working with Steel Threads](#working-with-steel-threads) +5. [Working with Backlog](#working-with-backlog) +6. [Documentation Management](#documentation-management) +7. [LLM Collaboration](#llm-collaboration) +8. [Agent Management](#agent-management) +9. [Testing](#testing) +10. [Troubleshooting](#troubleshooting) + +## Introduction + +Intent is a system designed to create a structured workflow and documentation process for developers working collaboratively with Large Language Models (LLMs). Intent provides templates, scripts, and process guidelines to enhance productivity while ensuring high-quality documentation as a byproduct of the development process. + +### Purpose + +Intent helps developers: + +- Organize and track development work +- Create and maintain project documentation +- Collaborate effectively with LLMs +- Preserve context across development sessions + +### Core Concepts + +- **Steel Thread**: A self-contained unit of work focusing on a specific piece of functionality, organized as a directory with structured documentation files +- **Documentation Structure**: Organized markdown files capturing project information +- **LLM Collaboration**: Patterns for effective work with language models + +## Installation + +### Prerequisites + +- POSIX-compatible shell (bash, zsh) +- Git (optional, for version control) +- Text editor with markdown support +- Backlog.md (for task management integration) + +### Installation Steps + +1. **Global Installation**: + + ```bash + # Clone the Intent repository + git clone https://github.com/matthewsinclair/intent.git ~/intent + + # Add Intent bin directory to PATH + echo 'export INTENT_HOME=~/intent' >> ~/.bashrc + echo 'export PATH=$PATH:$INTENT_HOME/bin' >> ~/.bashrc + + # Reload shell configuration + source ~/.bashrc + ``` + +2. **Project-Specific Installation**: + + ```bash + # From your project directory + git clone https://github.com/matthewsinclair/intent.git .intent + + # Create a local alias for the project + alias intent='./.intent/bin/intent' + ``` + +## Getting Started + +### Initializing a Project + +To set up Intent in a new or existing project: + +```bash +# Navigate to project directory +cd my-project + +# Initialize Intent with default directories (eng, llm, st, usr) +intent init "Project Name" + +# Or specify which directories to include +intent init --dirs "eng,llm,st,usr" "Project Name" + +# Or include all directories (including bin, _templ, tests) +intent init --all "Project Name" + +# Initialize Backlog for task management +intent bl init +``` + +This creates the Intent directory structure with template documents and sets up Backlog for task management. + +### Directory Structure + +After initialization with the default directories, you'll have this structure: + +``` +my-project/ +├── intent/ # Project documentation +│ ├── st/ # Steel threads (organized as directories) +│ │ └── ST0001/ # Example steel thread directory +│ │ ├── info.md # Steel thread metadata +│ │ ├── design.md # Design documentation +│ │ ├── impl.md # Implementation details +│ │ └── tasks.md # Task breakdown +│ ├── wip.md # Work in progress +│ ├── eng/ # Engineering docs +│ │ └── tpd/ # Technical Product Design +│ ├── usr/ # User documentation +│ └── llm/ # LLM-specific content +├── .intent/ # Configuration +│ └── config.json # Intent configuration +└── backlog/ # Backlog.md task management + ├── tasks/ # Active tasks + ├── drafts/ # Draft tasks + └── config.yml # Backlog configuration +``` + +If you use the `--all` option or include specific directories with `--dirs`, additional directories may be included: + +``` +my-project/ +└── intent/ + ├── bin/ # Intent scripts (only with --all or --dirs "bin") + ├── _templ/ # Templates (only with --all or --dirs "_templ") + └── tests/ # Tests (only with --all or --dirs "tests") +``` + +Note: Even when not copying bin files to the new project, Intent commands will still work because they execute from the centrally installed location. + +## Working with Steel Threads + +### Creating a Steel Thread + +To create a new steel thread: + +```bash +intent st new "Implement Feature X" +``` + +This creates a new steel thread directory (e.g., `intent/st/ST0001/`) with an `info.md` file containing metadata. + +### Viewing Steel Threads + +To list all steel threads: + +```bash +# Basic list of all steel threads +intent st list + +# Filter by status +intent st list --status "In Progress" + +# Adjust table width (useful for wide terminals) +intent st list --width 120 +``` + +To view a specific steel thread: + +```bash +intent st show ST0001 +``` + +To edit a steel thread in your default editor: + +```bash +intent st edit ST0001 +``` + +### Synchronizing Steel Threads + +To update the steel threads index file with information from individual ST directories: + +```bash +# Preview changes without writing to file +intent st sync + +# Write changes to steel_threads.md +intent st sync --write + +# Adjust output width +intent st sync --write --width 120 +``` + +### Completing a Steel Thread + +When all tasks in a steel thread are done: + +```bash +intent st done ST0001 +``` + +This updates the status and completion date. + +## Working with Backlog + +Intent integrates with Backlog.md for fine-grained task management. The `intent bl` wrapper provides a streamlined interface that avoids common issues like git fetch errors. + +### Initializing Backlog + +To set up Backlog in your project: + +```bash +# Initialize Backlog with Intent-friendly settings +intent bl init +``` + +This configures Backlog for local use, disabling remote operations that can cause errors. + +### Creating Tasks + +Tasks are linked to steel threads for traceability: + +```bash +# Create a task linked to a steel thread +intent bl create ST0001 "Implement user authentication" + +# Or use the task command +intent task create ST0001 "Add password validation" +``` + +### Listing Tasks + +View all tasks or filter by steel thread: + +```bash +# List all tasks (without git errors) +intent bl list + +# List tasks for a specific steel thread +intent task list ST0001 + +# View tasks in Kanban board +intent bl board +``` + +### Managing Task Status + +Update task status as work progresses: + +```bash +# Edit a task +intent bl task edit task-5 --status "In Progress" + +# Mark a task as done +intent bl task edit task-5 --status Done +``` + +### Synchronizing Status + +Keep steel thread status in sync with task completion: + +```bash +# View status summary +intent status show ST0001 + +# Sync steel thread status based on tasks +intent status sync ST0001 + +# Generate status report for all active threads +intent status report +``` + +### Migrating Existing Tasks + +If you have embedded tasks in steel threads, migrate them to Backlog: + +```bash +# Migrate tasks from a specific steel thread +intent migrate ST0001 + +# Preview migration without making changes +intent migrate --dry-run ST0001 + +# Migrate all active steel threads +intent migrate --all-active +``` + +### Managing Task ID Format + +Backlog can use zero-padded task IDs (e.g., task-001 instead of task-1) for better sorting. To retroactively update existing tasks: + +```bash +# Pad all tasks to 3 digits +intent bl task pad --all --size 3 + +# Or pad a specific task +intent bl task pad task-9 --size 3 + +# Use the configured padding size +intent bl task pad --all +``` + +After padding tasks, ensure new tasks use the same format: + +```bash +intent bl config set zeroPaddedIds 3 +``` + +### Best Practices + +1. **Use the wrapper**: Always use `intent bl` instead of `backlog` directly to avoid git errors +2. **Task naming**: Tasks are automatically named with the pattern "ST#### - Description" +3. **Regular syncing**: Run `intent status sync` to keep steel thread status current +4. **Consistent IDs**: Use zero-padded task IDs for better sorting and organization +5. **Task granularity**: Create tasks that can be completed in 1-2 days + +## Documentation Management + +Intent provides a structured approach to managing project documentation: + +### Updating Technical Product Design + +The technical product design document is the central reference for the project: + +```bash +# Open the TPD document +intent tpd +``` + +When making significant changes to the project, update the TPD to keep it in sync with the implementation. + +### Working with User Documentation + +User documentation is maintained in the `intent/usr/` directory: + +- `user_guide.md`: Task-oriented instructions for users +- `reference_guide.md`: Comprehensive reference information +- `deployment_guide.md`: Installation and deployment guidance + +Update these documents as features are added or changed. + +## LLM Collaboration + +Intent is designed for effective collaboration with Large Language Models like Claude: + +### Using the LLM Preamble + +The LLM preamble file contains context that should be shared with LLMs at the beginning of each session: + +```bash +# View the LLM preamble +cat intent/llm/llm_preamble.md +``` + +Include this preamble when starting new sessions with an LLM to provide essential context. + +### Understanding Intent Usage Patterns + +Intent provides usage rules documentation specifically designed for LLMs: + +```bash +# Display usage patterns and workflows for LLMs +intent llm usage_rules + +# Create symlink for Elixir projects (or other tools expecting usage-rules.md) +intent llm usage_rules --symlink + +# Save to a file for reference +intent llm usage_rules > usage-rules.md +``` + +This document helps LLMs understand: +- How to use Intent commands effectively +- Common workflows and best practices +- Steel thread management patterns +- Task integration with Backlog.md + +### Contextualizing Work with Steel Threads + +When working with an LLM on a specific steel thread: + +```bash +# Share the steel thread document with the LLM +intent st show ST0001 | [send to LLM] +``` + +This provides the LLM with task-specific context for more effective collaboration. + +## Agent Management + +Intent v2.1.0 integrates with Claude Code sub-agents to provide specialized AI assistance that understands Intent methodology and your project conventions. + +### What are Intent Agents? + +Intent agents are Claude Code sub-agents - specialized AI assistants with focused knowledge: + +- **Intent Agent**: Understands steel threads, Intent commands, and project structure +- **Elixir Agent**: Elixir code doctor with Usage Rules and Ash/Phoenix patterns +- **Custom Agents**: Project-specific agents you can create + +### Setting Up Agents + +#### Initializing Agent Configuration + +Before installing agents, you need to initialize the agent configuration: + +```bash +# Initialize global agent configuration +intent agents init + +# Initialize project-specific agent configuration +intent agents init --project +``` + +This creates the necessary directories and manifest files for agent management. + +#### Installing the Intent Agent + +```bash +# Check available agents +intent agents list + +# Install the Intent agent (recommended for all projects) +intent agents install intent + +# Install all available agents +intent agents install --all +``` + +#### Verifying Installation + +```bash +# Check agent status +intent agents status + +# Show agent details +intent agents show intent +``` + +### Managing Agents + +#### Keeping Agents Updated + +```bash +# Update agents with latest versions +intent agents sync + +# Check for modifications +intent agents status +``` + +#### Removing Agents + +```bash +# Remove specific agent +intent agents uninstall intent + +# Remove all Intent-managed agents +intent agents uninstall --all +``` + +### Using Agents with Claude + +Once installed, the Intent agent automatically provides Claude with: + +- Complete knowledge of Intent commands and methodology +- Understanding of steel thread structure and workflows +- Best practices for Intent project management +- Backlog.md integration patterns + +**Example: Claude with Intent Agent** + +``` +# Without Intent agent: +You: "Create a new feature for authentication" +Claude: "I'll help create authentication. What's your project structure?" +[You explain Intent, steel threads, etc.] + +# With Intent agent: +You: "Create a new feature for authentication" +Claude: "I'll create a steel thread for authentication: + + intent st new 'User Authentication System' + + This creates ST0042. Let me help document the intent + and break it into backlog tasks using Intent methodology..." +``` + +### Creating Custom Agents + +For project-specific conventions, create custom agents: + +```bash +# Create project agent directory +mkdir -p intent/agents/myproject + +# Create agent definition +cat > intent/agents/myproject/agent.md << 'EOF' +--- +name: myproject +description: Project-specific conventions and patterns +tools: Bash, Read, Write, Edit +--- + +You understand our specific project conventions: + +## Architecture +- API endpoints: /api/v2/{resource} +- Authentication: JWT Bearer tokens +- Database: PostgreSQL with migrations + +## Code Standards +- Test coverage: minimum 80% +- Documentation: JSDoc for all public APIs +- Git: conventional commits format +EOF + +# Install the custom agent +intent agents install myproject +``` + +### Agent Integration with Intent Commands + +Agents are automatically integrated with Intent's core workflow: + +- **intent init**: Detects Claude Code and offers agent installation +- **intent doctor**: Includes agent health checks +- **intent upgrade**: Preserves agent directories during migrations + +### Troubleshooting Agents + +#### Agent Not Found + +```bash +# Check if Claude Code is installed +which claude + +# Verify Claude agents directory exists +ls ~/.claude/agents/ +``` + +#### Agent Out of Sync + +```bash +# Check for local modifications +intent agents status + +# Sync with latest versions (overwrites local changes) +intent agents sync +``` + +#### Reinstalling Agents + +```bash +# Remove and reinstall +intent agents uninstall intent +intent agents install intent +``` + +## Testing + +Intent includes a comprehensive test suite to verify functionality: + +### Running Tests + +To run the test suite: + +```bash +# Run all tests +cd intent/tests/ +./run_tests.sh + +# Run specific test suite +./run_tests.sh bootstrap +``` + +### Test Structure + +Tests are organized by component: +- `bootstrap_test.bats`: Tests for bootstrap script +- `init_test.bats`: Tests for init command +- `st_test.bats`: Tests for steel thread commands +- `help_test.bats`: Tests for help system +- `main_test.bats`: Tests for main script + +## Upgrading Intent + +When new versions of Intent are released, you may need to upgrade your existing Intent projects to ensure compatibility with the latest features. + +### Running the Upgrade Command + +To upgrade all Intent files in your project to the latest format: + +```bash +intent upgrade +``` + +This command: +- Updates metadata in all Intent files +- Adds or updates JSON configuration +- Ensures files follow the current format standards +- Adds section markers for automatic sync + +### Forcing Upgrades + +For major version differences, the upgrade command will warn you before proceeding. To force the upgrade: + +```bash +intent upgrade --force +``` + +### After Upgrading + +After upgrading, it's a good practice to: + +1. Review updated files to ensure everything looks correct +2. Run a sync to update the steel threads index: + ```bash + intent st sync --write + ``` +3. Commit the changes if you're using version control + +## Troubleshooting + +### Common Issues + +#### Intent Commands Not Found + +If Intent commands are not found: + +```bash +# Check INTENT_HOME environment variable +echo $INTENT_HOME + +# Ensure Intent bin directory is in PATH +echo $PATH | grep intent + +# Fix PATH if needed +export PATH=$PATH:$INTENT_HOME/bin +``` + +#### Permission Issues + +If you encounter permission errors: + +```bash +# Make scripts executable +chmod +x $INTENT_HOME/bin/* +``` + +#### Template Generation Errors + +If template generation fails, check file permissions and ensure template files exist in the `_templ` directory. + +#### Backlog Git Fetch Errors + +If you see git fetch errors when using Backlog: + +```bash +# Use the Intent wrapper instead +intent bl list # Instead of: backlog task list + +# Ensure remote operations are disabled +backlog config get remoteOperations +# Should return: false +``` + +#### Task Not Found + +If tasks aren't showing up: + +```bash +# Check task files exist +ls backlog/tasks/ + +# Use --plain flag if needed +backlog task list --plain +``` diff --git a/intent/wip.md b/intent/wip.md new file mode 100644 index 0000000..d6ccafc --- /dev/null +++ b/intent/wip.md @@ -0,0 +1,36 @@ +--- +verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" +stp_version: 1.2.0 +--- +# Work In Progress + +This file serves as a placeholder for kicking off new sessions. + +See the following files for detailed information about the project: + +- [Technical Product Design](../eng/tpd/technical_product_design.md) +- [Steel Threads Overview](st/steel_threads.md) + +Read CLAUDE.md then wait for instruction. + +#### Restart + +{{RESTART_PROMPT}} + +#### Todo + +{{TODO}} + +## Important Notes + +- Always refer to the tool as "Intent" not "STP" +- The methodology is still "Steel Thread Process" but the tool is "Intent" +- Check CLAUDE.md for project-specific instructions +- This is a fail-forward implementation - no rollback mechanisms + +## First Steps + + 1. Read the current TPD at `intent/eng/tpd/technical_product_design.md` + 2. Compare it with actual implementation in `bin/` directory + 3. Review test coverage and what's actually working + 4. Create a comprehensive update plan before making changes diff --git a/lib/help/agents.help.md b/lib/help/agents.help.md new file mode 100644 index 0000000..3b62745 --- /dev/null +++ b/lib/help/agents.help.md @@ -0,0 +1,150 @@ +@short: Manage Claude Code sub-agents for Intent projects + +# intent agents + +Manage Claude Code sub-agents for Intent projects. + +## Synopsis + +``` +intent agents <command> [options] +``` + +## Description + +The Intent agent system integrates with Claude Code's sub-agent feature to provide specialized AI assistants that understand Intent's methodology and can help with specific development tasks. + +## Commands + +### list +List available and installed agents. + +``` +intent agents list +``` + +Shows all available agents (global and project-specific) with their installation status. + +### install +Install agent(s) to Claude configuration. + +``` +intent agents install <agent-name> [agent-name...] +intent agents install --all +intent agents install intent --force +``` + +Options: +- `--all` - Install all available agents +- `--force`, `-f` - Skip confirmation prompts + +### sync +Sync installed agents with latest versions. + +``` +intent agents sync [--force] +``` + +Updates installed agents while respecting local modifications. Use `--force` to overwrite local changes. + +### uninstall +Remove Intent-managed agents. + +``` +intent agents uninstall <agent-name> [agent-name...] +intent agents uninstall --all +``` + +Options: +- `--all` - Uninstall all Intent-managed agents +- `--force`, `-f` - Skip confirmation prompts + +### show +Display detailed agent information. + +``` +intent agents show <agent-name> +``` + +Shows metadata, installation status, and system prompt preview for a specific agent. + +### status +Check agent health and integrity. + +``` +intent agents status [--verbose] +``` + +Verifies installed agents against manifests, checks for modifications, and reports any issues. + +Options: +- `--verbose`, `-v` - Show detailed information for each agent + +## Available Agents + +### intent +The Intent-aware development assistant that understands: +- Steel thread methodology +- Intent commands and structure +- Backlog task management +- Project organization + +### elixir +Elixir code doctor featuring: +- 19 Elixir best practices +- Usage Rules methodology +- Ash and Phoenix patterns +- Functional programming guidance + +## Examples + +```bash +# List all available agents +intent agents list + +# Install the Intent agent +intent agents install intent + +# Install all available agents +intent agents install --all + +# Check agent status +intent agents status + +# Update agents with available changes +intent agents sync + +# Show details about an agent +intent agents show elixir + +# Uninstall a specific agent +intent agents uninstall elixir +``` + +## Agent Locations + +- **Global agents**: `$INTENT_HOME/agents/` +- **Project agents**: `./intent/agents/` +- **Installed to**: `~/.claude/agents/` +- **Manifests**: `.manifest/` subdirectories + +## Creating Custom Agents + +To create a custom agent for your project: + +1. Create directory: `intent/agents/my-agent/` +2. Add `agent.md` with YAML frontmatter and system prompt +3. Add `metadata.json` with version and description +4. Install with: `intent agents install my-agent` + +## Troubleshooting + +- **"Claude Code not detected"**: Install Claude Code from https://claude.ai/download +- **"Agent file not found"**: Run `intent agents install` to restore missing agents +- **"Local changes detected"**: Your modifications are preserved; use `sync --force` to overwrite + +## See Also + +- `intent help` - General help +- `intent doctor` - Check system configuration +- Claude Code sub-agents documentation: https://docs.anthropic.com/en/docs/claude-code/sub-agents \ No newline at end of file diff --git a/lib/help/fileindex.help.md b/lib/help/fileindex.help.md new file mode 100644 index 0000000..2c96a7e --- /dev/null +++ b/lib/help/fileindex.help.md @@ -0,0 +1,137 @@ +@short: +Create and manage file indexes with checkbox states for tracking file processing + +@description: +The fileindex command creates indexes of files matching specified patterns, with support +for checkbox states to track which files have been processed or reviewed. It can work +both as a standalone tool and integrate with Intent projects for enhanced functionality. + +@usage: +intent fileindex [OPTIONS] [STARTDIR] [FILESPEC] + +@options: +-r Recurse through subdirectories +-v Verbose mode (show processing details and summary) +-f FILE Output to file instead of stdout +--file FILE Output to file instead of stdout (alternative syntax) +-i FILE Use index file to maintain checked states +--index FILE Use index file to maintain checked states (alternative syntax) +-X FILE Toggle the checked state of FILE in the index +--toggle FILE Toggle the checked state of FILE in the index (alternative syntax) +-C FILE Set FILE to checked [x] state in the index +--check FILE Set FILE to checked [x] state in the index (alternative syntax) +-U FILE Set FILE to unchecked [ ] state in the index +--uncheck FILE Set FILE to unchecked [ ] state in the index (alternative syntax) +--index-dir DIR Specify default directory for index files +--intent-dir DIR Specify Intent project directory explicitly +--no-intent Disable Intent integration even if in a project +-h Show help message + +@arguments: +STARTDIR Directory to search in (defaults vary by context) +FILESPEC File pattern to match (e.g., "*.py", "*.{ex,exs}") + +@defaults: +When run within an Intent project: + STARTDIR: lib/ + FILESPEC: *.{ex,exs} + INDEX_DIR: .intent/indexes/ + +When run standalone: + STARTDIR: . (current directory) + FILESPEC: *.{ex,exs} + INDEX_DIR: . (current directory) + +@examples: +# List all Elixir files in the current directory +intent fileindex + +# Recursively list all Elixir files +intent fileindex -r + +# Create an index file for tracking progress +intent fileindex -r -i project.index + +# Search Python files in src/ directory +intent fileindex src "*.py" + +# Use verbose mode to see processing details +intent fileindex -rv + +# Output to a file instead of stdout +intent fileindex -r -f filelist.txt + +# Disable Intent integration in a project +intent fileindex --no-intent + +# Toggle a file's checked state (switches between [ ] and [x]) +intent fileindex -i project.index -X lib/myapp/user.ex +# Output shows new state: +# [x] lib/myapp/user.ex (if it was unchecked) +# [ ] lib/myapp/user.ex (if it was checked) + +# Check a specific file (set to [x]) +intent fileindex -i project.index -C lib/myapp/user.ex +# Output: [x] lib/myapp/user.ex + +# Uncheck a specific file (set to [ ]) +intent fileindex -i project.index -U lib/myapp/user.ex +# Output: [ ] lib/myapp/user.ex + +# Example workflow: process files one by one +# 1. Create initial index +intent fileindex -r -i review.index +# 2. Review first file and mark as checked +vim lib/myapp/user.ex +intent fileindex -i review.index -C lib/myapp/user.ex +# 3. Continue with next file... +vim lib/myapp/router.ex +intent fileindex -i review.index -C lib/myapp/router.ex +# 4. View current status +cat review.index | grep "^\[.\]" + +@index_file_format: +Index files contain: +1. A JSON configuration header with metadata +2. File entries in format: [x] filename or [ ] filename + - [ ] indicates unchecked/unprocessed + - [x] indicates checked/processed + +Example index file: +``` +{ + "generator": "intent-fileindex", + "version": "1.1", + "timestamp": "2024-01-15T10:30:00Z", + "context": "intent_project", + "config": { + "startdir": "lib", + "filespec": "*.{ex,exs}", + "recursive": true + } +} + +[ ] lib/myapp/application.ex +[x] lib/myapp/router.ex +[ ] lib/myapp/supervisor.ex +``` + +@features: +- Smart defaults based on context (Intent project vs standalone) +- Persistent checkbox states for tracking file processing +- JSON metadata header for reproducibility +- Handles file additions and removals automatically +- Verbose mode for debugging and progress tracking +- Flexible output options (stdout, file, or index) + +@notes: +- When using an index file, the tool preserves checkbox states across runs +- Files that no longer exist are automatically removed from the index +- New files are added with unchecked state +- The index file is updated atomically to prevent corruption +- Toggle mode requires an existing index file with the target file present +- Toggle output shows the new state of the file after toggling +- Check/uncheck modes require an existing index file with the target file present +- Check mode sets files to [x] state regardless of current state +- Uncheck mode sets files to [ ] state regardless of current state +- Check/uncheck operations are idempotent - checking an already checked file keeps it checked \ No newline at end of file diff --git a/stp/_templ/eng/tpd/_1_introduction.md b/lib/templates/eng/tpd/_1_introduction.md similarity index 100% rename from stp/_templ/eng/tpd/_1_introduction.md rename to lib/templates/eng/tpd/_1_introduction.md diff --git a/stp/_templ/eng/tpd/_2_requirements.md b/lib/templates/eng/tpd/_2_requirements.md similarity index 100% rename from stp/_templ/eng/tpd/_2_requirements.md rename to lib/templates/eng/tpd/_2_requirements.md diff --git a/stp/_templ/eng/tpd/_3_architecture.md b/lib/templates/eng/tpd/_3_architecture.md similarity index 100% rename from stp/_templ/eng/tpd/_3_architecture.md rename to lib/templates/eng/tpd/_3_architecture.md diff --git a/stp/_templ/eng/tpd/_4_detailed_design.md b/lib/templates/eng/tpd/_4_detailed_design.md similarity index 100% rename from stp/_templ/eng/tpd/_4_detailed_design.md rename to lib/templates/eng/tpd/_4_detailed_design.md diff --git a/stp/_templ/eng/tpd/_5_implementation_strategy.md b/lib/templates/eng/tpd/_5_implementation_strategy.md similarity index 100% rename from stp/_templ/eng/tpd/_5_implementation_strategy.md rename to lib/templates/eng/tpd/_5_implementation_strategy.md diff --git a/stp/_templ/eng/tpd/_6_deployment_and_operations.md b/lib/templates/eng/tpd/_6_deployment_and_operations.md similarity index 100% rename from stp/_templ/eng/tpd/_6_deployment_and_operations.md rename to lib/templates/eng/tpd/_6_deployment_and_operations.md diff --git a/stp/_templ/eng/tpd/_7_technical_challenges_and_mitigations.md b/lib/templates/eng/tpd/_7_technical_challenges_and_mitigations.md similarity index 100% rename from stp/_templ/eng/tpd/_7_technical_challenges_and_mitigations.md rename to lib/templates/eng/tpd/_7_technical_challenges_and_mitigations.md diff --git a/stp/_templ/eng/tpd/_8_appendices.md b/lib/templates/eng/tpd/_8_appendices.md similarity index 100% rename from stp/_templ/eng/tpd/_8_appendices.md rename to lib/templates/eng/tpd/_8_appendices.md diff --git a/stp/_templ/eng/tpd/_technical_product_design.md b/lib/templates/eng/tpd/_technical_product_design.md similarity index 100% rename from stp/_templ/eng/tpd/_technical_product_design.md rename to lib/templates/eng/tpd/_technical_product_design.md diff --git a/lib/templates/llm/_CLAUDE.md b/lib/templates/llm/_CLAUDE.md new file mode 100644 index 0000000..103fcca --- /dev/null +++ b/lib/templates/llm/_CLAUDE.md @@ -0,0 +1,99 @@ +# [[PROJECT_NAME]] Project Guidelines + +This is an Intent v2.2.0 project. + +## Project Structure + +- `intent/` - Project artifacts (steel threads, docs, work tracking) + - `st/` - Steel threads organized as directories + - `docs/` - Technical documentation + - `llm/` - LLM-specific guidelines +- `backlog/` - Task management (if using Backlog.md) +- `.intent/` - Configuration and metadata + +## Steel Threads + +Steel threads are organized as directories under `intent/st/`: + +- Each steel thread has its own directory (e.g., ST0001/) +- Minimum required file is `info.md` with metadata +- Optional files: design.md, impl.md, tasks.md + +## Commands + +- `intent st new "Title"` - Create a new steel thread +- `intent st list` - List all steel threads +- `intent st show <id>` - Show steel thread details +- `intent agents init` - Initialize agent configuration +- `intent agents list` - List available agents +- `intent agents install <agent>` - Install an agent +- `intent doctor` - Check configuration +- `intent help` - Get help + +## Intent Agents + +This project has access to specialized AI agents through Intent's agent system. These agents are Claude Code sub-agents with domain-specific expertise. + +### Available Agents + +1. **intent** - Intent methodology specialist + - Steel thread management and best practices + - Backlog task tracking + - Intent command usage and workflows + - Project structure guidance + +2. **elixir** - Elixir code doctor + - Functional programming patterns + - Elixir Usage Rules and best practices + - Ash and Phoenix framework expertise + - Code review and optimization + +### Using Agents + +To delegate tasks to specialized agents, use the Task tool with the appropriate subagent_type: + +``` +Task( + description="Review Elixir code", + prompt="Review the authentication module for Usage Rules compliance", + subagent_type="elixir" +) +``` + +### When to Use Agents + +**Use the intent agent for:** + +- Creating or managing steel threads +- Understanding Intent project structure +- Working with backlog tasks +- Following Intent best practices + +**Use the elixir agent for:** + +- Writing idiomatic Elixir code +- Reviewing code for Usage Rules +- Ash/Phoenix implementation guidance +- Functional programming patterns + +**Use main Claude for:** + +- General programming tasks +- Cross-cutting concerns +- Integration between systems +- Tasks requiring broad context + +### Best Practices + +1. Delegate specialized tasks to appropriate agents +2. Provide clear, focused prompts to agents +3. Agents work best with specific, bounded tasks +4. Consider using multiple agents for complex workflows + +## Project-Specific Guidelines + +[[Add your project-specific guidelines here]] + +## Author + +[[Your name]] diff --git a/lib/templates/llm/_llm_preamble.md b/lib/templates/llm/_llm_preamble.md new file mode 100644 index 0000000..0a97e53 --- /dev/null +++ b/lib/templates/llm/_llm_preamble.md @@ -0,0 +1,138 @@ +--- +verblock: "05 Aug 2025:v0.3: Matthew Sinclair - Updated for Intent v2.2.0 with fileindex" +intent_version: 2.2.0 +--- +# LLM Preamble + +This document provides essential context for LLMs working on the [[PROJECT_NAME]] project. Share this document at the beginning of each LLM session to establish baseline understanding. + +## Project Context + +[[PROJECT_NAME]] follows the Intent methodology (formerly Steel Thread Process), which organizes development into discrete "steel threads" - self-contained units of functionality that enable incremental progress with clear documentation. + +## Navigation Guide + +When working with this repository, focus on these key documents in order: + +1. **START HERE**: `CLAUDE.md` - Project-specific guidelines and instructions +2. **NEXT**: `intent/st/` - Review steel thread directories for project history +3. **THEN**: `Backlog.md` (if exists) - Current tasks and priorities +4. **REFERENCE**: `intent/docs/` - Technical documentation + +## Documentation Structure + +The Intent methodology organizes project information through this directory structure: + +- **intent/**: Project artifacts + - **intent/st/**: Steel thread directories (ST0001/, ST0002/, etc.) + - **intent/docs/**: Technical documentation + - **intent/llm/**: LLM-specific guidelines +- **backlog/**: Task management (if using Backlog.md) +- **.intent/**: Configuration and metadata + +## Steel Thread Process + +Work in this project is organized through steel threads: + +1. **Definition**: A steel thread is a self-contained unit of work representing a logical piece of functionality +2. **Structure**: Each steel thread has its own directory with: + - `info.md` - Metadata and overview (required) + - `design.md` - Design documentation (optional) + - `impl.md` - Implementation notes (optional) + - `tasks.md` - Task breakdown (optional) +3. **Management**: Steel threads are created and tracked using Intent commands + +## Intent Agent System + +This project can leverage specialized AI agents through Intent's agent system: + +### Available Agents + +1. **intent** - Intent methodology specialist + - Steel thread management and best practices + - Backlog task tracking + - Intent command usage and workflows + - Project structure guidance + +2. **elixir** - Elixir code doctor + - Functional programming patterns + - Elixir Usage Rules and best practices + - Ash and Phoenix framework expertise + - Code review and optimization + +### Using Agents + +Delegate tasks to specialized agents using the Task tool: + +``` +Task( + description="Short task description", + prompt="Detailed instructions for the agent", + subagent_type="agent_name" +) +``` + +### When to Use Agents + +**Use specialized agents when:** + +- Task requires deep domain knowledge +- Performing focused code reviews +- Following specific methodologies +- Task is well-bounded and focused + +**Use main Claude when:** + +- Task requires full project context +- Integrating multiple systems +- General programming tasks +- Exploratory work or debugging + +## Command Usage + +The Intent system provides these commands: + +- `intent st new "Title"` - Create a new steel thread +- `intent st list` - List all steel threads +- `intent st show <id>` - Show steel thread details +- `intent agents init` - Initialize agent configuration +- `intent agents list` - List available agents +- `intent agents install <agent>` - Install an agent +- `intent agents sync` - Update agents to latest versions +- `intent agents status` - Check agent health +- `intent doctor` - Check configuration +- `intent help` - Get help + +## Code Style and Conventions + +The following guidelines apply to this project: + +- **Indentation**: Use 2-space indentation in all programming languages +- **Documentation**: Update documentation alongside code changes +- **Naming**: Use descriptive variable and function names +- **Error Handling**: Implement robust error handling +- **Testing**: Include appropriate tests for new functionality +- **Markdown**: Maintain consistent formatting + +[[Add specific code style guidelines for your project's primary languages]] + +## How to Help + +When assisting with this project: + +1. Review CLAUDE.md for project-specific guidelines +2. Use specialized agents for domain-specific tasks +3. Maintain consistency with existing patterns +4. Update documentation alongside code changes +5. Track progress using Backlog.md if available +6. Create steel threads for new features or significant work + +## Project-Specific Information + +[[Add essential project-specific information here: + +- Key technologies used +- External dependencies +- Development setup instructions +- Architectural principles +- Known limitations or considerations]] diff --git a/stp/_templ/prj/_wip.md b/lib/templates/prj/_wip.md similarity index 100% rename from stp/_templ/prj/_wip.md rename to lib/templates/prj/_wip.md diff --git a/stp/_templ/prj/st/ST####/design.md b/lib/templates/prj/st/ST####/design.md similarity index 100% rename from stp/_templ/prj/st/ST####/design.md rename to lib/templates/prj/st/ST####/design.md diff --git a/stp/_templ/prj/st/ST####/impl.md b/lib/templates/prj/st/ST####/impl.md similarity index 100% rename from stp/_templ/prj/st/ST####/impl.md rename to lib/templates/prj/st/ST####/impl.md diff --git a/stp/_templ/prj/st/ST####/info.md b/lib/templates/prj/st/ST####/info.md similarity index 85% rename from stp/_templ/prj/st/ST####/info.md rename to lib/templates/prj/st/ST####/info.md index e6685f4..9100f12 100644 --- a/stp/_templ/prj/st/ST####/info.md +++ b/lib/templates/prj/st/ST####/info.md @@ -1,17 +1,12 @@ --- verblock: "[Date]:v0.1: [Author] - Initial version" -stp_version: 1.2.1 +intent_version: 2.2.0 status: Not Started created: YYYYMMDD completed: --- # ST####: [Title] -- **Status**: [Not Started|In Progress|Completed|On Hold|Cancelled] -- **Created**: YYYY-MM-DD -- **Completed**: YYYY-MM-DD -- **Author**: [Author Name] - ## Objective [Clear statement of what this steel thread aims to accomplish] diff --git a/stp/_templ/prj/st/ST####/tasks.md b/lib/templates/prj/st/ST####/tasks.md similarity index 100% rename from stp/_templ/prj/st/ST####/tasks.md rename to lib/templates/prj/st/ST####/tasks.md diff --git a/stp/_templ/prj/st/_steel_threads.md b/lib/templates/prj/st/_steel_threads.md similarity index 98% rename from stp/_templ/prj/st/_steel_threads.md rename to lib/templates/prj/st/_steel_threads.md index 8476ada..d838ddb 100644 --- a/stp/_templ/prj/st/_steel_threads.md +++ b/lib/templates/prj/st/_steel_threads.md @@ -38,4 +38,3 @@ The detailed information for each steel thread is contained in its directory (e. - design.md: Design decisions and approach - impl.md: Implementation details - tasks.md: Task tracking -- results.md: Results and outcomes diff --git a/stp/_templ/usr/_deployment_guide.md b/lib/templates/usr/_deployment_guide.md similarity index 97% rename from stp/_templ/usr/_deployment_guide.md rename to lib/templates/usr/_deployment_guide.md index 8ffdf4e..31ff465 100644 --- a/stp/_templ/usr/_deployment_guide.md +++ b/lib/templates/usr/_deployment_guide.md @@ -30,7 +30,7 @@ Install STP globally to make it available for all projects: ```bash # Clone the STP repository -git clone https://github.com/username/stp.git ~/stp +git clone https://github.com/matthewsinclair/stp.git ~/stp # Add STP bin directory to PATH in shell profile echo 'export STP_HOME=~/stp' >> ~/.bashrc @@ -46,7 +46,7 @@ Install STP within a specific project: ```bash # From your project directory -git clone https://github.com/username/stp.git .stp +git clone https://github.com/matthewsinclair/stp.git .stp # Create a local alias for the project alias stp='./.stp/bin/stp' diff --git a/stp/_templ/usr/_reference_guide.md b/lib/templates/usr/_reference_guide.md similarity index 100% rename from stp/_templ/usr/_reference_guide.md rename to lib/templates/usr/_reference_guide.md diff --git a/stp/_templ/usr/_user_guide.md b/lib/templates/usr/_user_guide.md similarity index 100% rename from stp/_templ/usr/_user_guide.md rename to lib/templates/usr/_user_guide.md diff --git a/stp/_templ/llm/_llm_preamble.md b/stp/_templ/llm/_llm_preamble.md deleted file mode 100644 index 2a9cbee..0000000 --- a/stp/_templ/llm/_llm_preamble.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" ---- -# LLM Preamble - -This document provides essential context for LLMs working on the [[PROJECT_NAME]] project. Share this document at the beginning of each LLM session to establish baseline understanding. - -## Project Context - -[[PROJECT_NAME]] follows the Steel Thread Process (STP) methodology, which organizes development into discrete "steel threads" - self-contained units of functionality that enable incremental progress with clear documentation. - -## Navigation Guide - -When working with this repository, you should focus on these key documents in this specific order: - -1. **START HERE**: `stp/eng/tpd/technical_product_design.md` - Contains comprehensive information about the project vision, architecture, and current state. - -2. **NEXT**: `stp/prj/st/steel_threads.md` - Provides a complete index of all steel threads with their status. Review this to understand what work has been completed and what remains. - -3. **THEN**: `stp/prj/wip.md` - Details the current work in progress and priorities. This is your guide to what should be worked on now. - -4. **FINALLY**: Use `stp bl list` and steel thread documents to review historical work completed. Backlog tasks provide detailed progress tracking. - -## Documentation Structure - -The STP methodology organizes project information through a specific directory structure: - -- **stp/prj/**: Project management documents - - **stp/prj/wip.md**: Current work in progress - - **Backlog tasks**: Historical record of project activities and progress - - **stp/prj/st/**: Steel thread documents and index -- **stp/eng/**: Engineering documentation - - **stp/eng/tpd/**: Technical Product Design documents -- **stp/usr/**: User documentation - - **stp/usr/user_guide.md**: End-user instructions - - **stp/usr/reference_guide.md**: Complete feature reference∏ - - **stp/usr/deployment_guide.md**: Deployment instructions -- **stp/llm/**: LLM-specific content - - **stp/llm/llm_preamble.md**: This document - -## Steel Thread Process - -Work in this project is organized through steel threads: - -1. **Definition**: A steel thread is a self-contained unit of work that represents a logical piece of functionality -2. **Workflow**: - - Steel threads start as "Not Started" - - When work begins, they move to "In Progress" - - When completed, they are marked as "Completed" - - They can also be "On Hold" or "Cancelled" as needed -3. **Documentation**: Each steel thread has its own markdown document in `stp/prj/st/` -4. **Management**: Steel threads are created, tracked, and completed using STP commands - -## Code Style and Conventions - -The following code style guidelines apply to this project: - -- **Indentation**: Use 2-space indentation in all programming languages -- **Documentation**: Add clear documentation for all code components -- **Naming**: Use descriptive variable and function names -- **Error Handling**: Implement robust error handling according to language best practices -- **Testing**: Include appropriate tests for all new functionality -- **Markdown**: Maintain consistent formatting in all markdown documents - -[Add specific code style guidelines for the project's primary programming languages] - -## Command Usage - -The STP system provides these commands for project management: - -- `stp init <project_name> [directory]`: Initialize STP in a project -- `stp st new <title>`: Create a new steel thread -- `stp st done <id>`: Mark a steel thread as complete -- `stp st list [--status <status>]`: List all steel threads -- `stp st show <id>`: Display details of a specific steel thread -- `stp st edit <id>`: Open a steel thread in your default editor -- `stp help [command]`: Display help information - -## How to Help - -When assisting with this project, you should: - -1. First, understand the current context by reviewing the documents in the order specified -2. Focus on the work in progress as defined in `stp/prj/wip.md` -3. Maintain consistency with existing patterns and documentation standards -4. Update documentation alongside code changes -5. Use the steel thread model to organize new work -6. Update task status in Backlog to track progress - -[Add any project-specific collaboration guidelines here] - -## Project-Specific Information - -[Add essential project-specific information here that doesn't fit elsewhere, such as: - -- Key technologies used -- External dependencies -- Special development setup instructions -- Important architectural principles -- Known limitations or considerations] diff --git a/stp/_templ/prj/st/ST####/results.md b/stp/_templ/prj/st/ST####/results.md deleted file mode 100644 index 39fd457..0000000 --- a/stp/_templ/prj/st/ST####/results.md +++ /dev/null @@ -1,21 +0,0 @@ -# Results - ST####: [Title] - -## Results - -[Summary of results after completion, including outcomes and any follow-up work needed] - -## Outcomes - -[Specific outcomes achieved] - -## Metrics - -[Any relevant metrics or measurements] - -## Lessons Learned - -[Key lessons learned during implementation] - -## Follow-up Work - -[Any follow-up work identified] \ No newline at end of file diff --git a/stp/bin/.help/backlog-install.help.md b/stp/bin/.help/backlog-install.help.md deleted file mode 100644 index b14d721..0000000 --- a/stp/bin/.help/backlog-install.help.md +++ /dev/null @@ -1,72 +0,0 @@ -# Backlog.md Installation Guide - -## Error: Backlog.md is not installed - -The STP Backlog integration requires Backlog.md to be installed on your system. - -## Installation Instructions - -### Method 1: Using npm (Recommended) - -```bash -npm install -g backlog-md -``` - -### Method 2: Using yarn - -```bash -yarn global add backlog-md -``` - -### Method 3: From Source - -1. Clone the repository: - ```bash - git clone https://github.com/MrLesk/Backlog.md.git - cd Backlog.md - ``` - -2. Install dependencies: - ```bash - npm install - ``` - -3. Link globally: - ```bash - npm link - ``` - -## Verify Installation - -After installation, verify that Backlog is available: - -```bash -backlog --version -``` - -## Initialize Backlog for STP - -Once installed, initialize Backlog with STP-friendly settings: - -```bash -stp bl init -``` - -## More Information - -- Backlog.md Repository: https://github.com/MrLesk/Backlog.md -- Backlog.md Documentation: https://github.com/MrLesk/Backlog.md#readme -- STP Integration Guide: Run `stp help backlog` for integration-specific help - -## Troubleshooting - -If you continue to see this error after installation: - -1. Ensure the installation directory is in your PATH -2. Try opening a new terminal session -3. Check that the `backlog` command is accessible: - ```bash - which backlog - ``` - -If you're using a non-standard shell or environment, you may need to manually add the npm global bin directory to your PATH. \ No newline at end of file diff --git a/stp/bin/.help/backlog.help.md b/stp/bin/.help/backlog.help.md deleted file mode 100644 index 60f73ff..0000000 --- a/stp/bin/.help/backlog.help.md +++ /dev/null @@ -1,51 +0,0 @@ -@short: -STP wrapper for Backlog.md task management - -@description: -The backlog command (also available as 'bl') provides a streamlined interface -to Backlog.md that's optimized for STP workflows. It automatically handles -common issues like git fetch errors and provides shortcuts for task creation. - -@usage: -stp backlog <command> [options] [arguments] -stp bl <command> [options] [arguments] - -@commands: -init Initialize backlog in current project -task <subcommand> Task management (create, list, edit, etc.) -list List all tasks (alias for 'task list --plain') -create <ST####> <title> Create a task linked to a steel thread -board Display tasks in Kanban board -config Manage backlog configuration -browser Open browser interface - -@features: -- Automatically adds --plain to list/board commands to prevent git errors -- Disables remote operations for local STP projects during init -- Provides shortcuts for common operations -- Maintains full backlog functionality - -@examples: -# Initialize backlog for your project -stp bl init - -# List all tasks without git fetch errors -stp bl list - -# Create a task linked to a steel thread -stp bl create ST0014 "Add validation logic" - -# Edit a specific task -stp bl task edit task-5 - -# View tasks in Kanban board -stp bl board - -# Open browser interface -stp bl browser - -@notes: -- This wrapper configures backlog for local use (no git operations) -- Use 'backlog' directly if you need remote git functionality -- Task status values: "To Do", "In Progress", "Done" -- For full backlog documentation, run: backlog help \ No newline at end of file diff --git a/stp/bin/.help/bl.help.md b/stp/bin/.help/bl.help.md deleted file mode 100644 index 0a2295f..0000000 --- a/stp/bin/.help/bl.help.md +++ /dev/null @@ -1,17 +0,0 @@ -@short: -Shorthand for 'stp backlog' command - -@description: -The 'bl' command is a convenient alias for 'stp backlog'. It provides all the -same functionality with less typing. - -@usage: -stp bl <command> [options] [arguments] - -@examples: -stp bl list # List all tasks -stp bl create ST0014 "Fix bug" # Create a task -stp bl board # View Kanban board - -@notes: -See 'stp help backlog' for full documentation. \ No newline at end of file diff --git a/stp/bin/.help/help.help.md b/stp/bin/.help/help.help.md deleted file mode 100644 index 94f9437..0000000 --- a/stp/bin/.help/help.help.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" ---- -# help - -@short: -Display help information for STP commands - -@desc: -The 'help' command provides detailed documentation for STP commands -and features. It can be used to display general usage information -or to get specific help for individual commands. - -When used without arguments, it shows a summary of all available commands. -When a command name is provided, it displays detailed help for that command. - -@usage: -stp help [command] - -Arguments: - command The name of the command to get help for (optional) - -Examples: - stp help # Display general help - stp help init # Show help for the 'init' command - stp help st # Show help for the 'st' command \ No newline at end of file diff --git a/stp/bin/.help/init.help.md b/stp/bin/.help/init.help.md deleted file mode 100644 index 55c8c4a..0000000 --- a/stp/bin/.help/init.help.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" ---- -# init - -@short: -Initialize STP in a project - -@desc: -The 'init' command sets up the Steel Thread Process (STP) structure in a -new or existing project. It creates the necessary directory structure, -initializes template documents, and configures the project for STP use. - -This command should be run once at the beginning of a project or when -adding STP to an existing project. - -@usage: -stp init [options] <project_name> [directory] - -Options: - -d, --dirs Comma-separated list of directories to copy (default: eng,llm,prj,usr) - -a, --all Copy all directories, including bin, _templ, tests - -Arguments: - project_name Name of the project (required) - directory Target directory (optional, defaults to current directory) - -The command creates the following directory structure by default: - prj/ # Project documentation - st/ # Steel threads - wip.md # Work in progress - eng/ # Engineering docs - tpd/ # Technical Product Design - usr/ # User documentation - llm/ # LLM-specific content - -When using the --all option or specifying with --dirs, additional directories may be included: - _templ/ # Templates (only with --all or --dirs "_templ") - bin/ # STP scripts (only with --all or --dirs "bin") - tests/ # Tests (only with --all or --dirs "tests") - -Examples: - stp init "My Project" # Initialize with default directories - stp init "My Project" ./my-project # Initialize in specified directory - stp init --dirs "eng,llm,prj,usr,bin" "My Project" # Specify which directories to include - stp init --all "My Project" # Include all directories diff --git a/stp/bin/.help/llm.help.md b/stp/bin/.help/llm.help.md deleted file mode 100644 index 69a4a54..0000000 --- a/stp/bin/.help/llm.help.md +++ /dev/null @@ -1,51 +0,0 @@ -@short: -Commands for LLM integration and assistance - -@description: -The llm command provides utilities for working with Large Language Models (LLMs) -in the context of STP. It includes tools to help LLMs understand how to use STP -effectively and to facilitate better collaboration between developers and AI assistants. - -@usage: -stp llm <subcommand> [options] - -@subcommands: -usage_rules Display the STP usage rules document - -@options: ---symlink [dir] Create a symlink to usage-rules.md (for usage_rules subcommand) - -@details: -The usage_rules subcommand displays the complete STP usage patterns and workflows -documentation. This document is designed specifically for LLMs to understand: - -- How to use STP commands effectively -- Common workflows and patterns -- Best practices for steel thread management -- Task integration with Backlog.md -- LLM collaboration patterns - -The usage rules follow the pattern established by the Elixir Hex package 'usage_rules' -and provide comprehensive guidance for AI-assisted development with STP. - -@examples: -# Display usage rules for LLMs -stp llm usage_rules - -# Create symlink in current directory -stp llm usage_rules --symlink - -# Create symlink in specific directory -stp llm usage_rules --symlink ~/my-project - -# Pipe to a pager for easier reading -stp llm usage_rules | less - -# Save to a file for reference -stp llm usage_rules > stp-usage-rules.md - -@notes: -- The usage rules document is located at stp/eng/usage-rules.md -- It can be regenerated using the prompt at stp/eng/prompts/regenerate_usage_rules.md -- LLMs should read this document to understand STP workflows -- The --symlink option creates a symlink named 'usage-rules.md' following the Hex package convention \ No newline at end of file diff --git a/stp/bin/.help/migrate.help.md b/stp/bin/.help/migrate.help.md deleted file mode 100644 index 8a830ef..0000000 --- a/stp/bin/.help/migrate.help.md +++ /dev/null @@ -1,34 +0,0 @@ -@short: -Migrate embedded tasks from steel threads to Backlog - -@description: -The migrate command helps transition from embedded task lists (checkboxes) -in steel thread documents to individual Backlog task files. This provides -better task management capabilities while preserving task state. - -@usage: -stp migrate [options] <ST####> - -@arguments: -ST#### Steel thread ID to migrate - -@options: ---all-active Migrate all active steel threads ---dry-run Show what would be migrated without creating tasks - -@examples: -# Migrate a single steel thread -stp migrate ST0014 - -# Preview migration without making changes -stp migrate --dry-run ST0014 - -# Migrate all active steel threads -stp migrate --all-active - -@notes: -- Extracts tasks from the ## Tasks section -- Preserves checkbox state (done/not done) -- Updates steel thread to reference Backlog tasks -- Creates task files in backlog/tasks directory -- Task IDs are assigned automatically by Backlog \ No newline at end of file diff --git a/stp/bin/.help/st.help.md b/stp/bin/.help/st.help.md deleted file mode 100644 index 9089723..0000000 --- a/stp/bin/.help/st.help.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.2: Matthew Sinclair - Updated for directory structure" ---- -# st - -@short: -Manage steel threads for the project - -@desc: -Steel threads are self-contained units of work that focus on implementing -specific pieces of functionality. The 'st' command helps create, manage, -and track steel threads throughout the development process. - -Starting with STP v1.2.1, steel threads are organized as directories containing -multiple files, allowing better separation of concerns and richer documentation. - -Steel threads provide a structured way to organize development tasks, -making it easier to collaborate with LLMs and track progress over time. - -@usage: -stp st <command> [options] [arguments] - -Commands: - new <title> Create a new steel thread directory - done <id> Mark a steel thread as complete - list [--status <status>] [--width N] List all steel threads - sync [--write] [--width N] Synchronize steel_threads.md with individual ST directories - show <id> [file] Show details of a specific steel thread file - edit <id> [file] Open a steel thread file in your default editor - organize [--write] Organize steel thread directories by status - -Options for 'list': - --status <status> Filter steel threads by status - Valid statuses: Not Started, In Progress, Completed, On Hold, Cancelled - --width N Set the output table width in columns (defaults to terminal width) - -Options for 'sync': - --write Update the steel_threads.md file (without this flag, output is sent to stdout) - --width N Set the output table width in columns (defaults to terminal width) - -Options for 'show' and 'edit': - file Specific file to show/edit (optional, defaults to 'info') - Valid files: info, design, impl, tasks, results, all - -Options for 'organize': - --write Actually move directories (without this flag, shows preview) - -Examples: - stp st new "Implement User Authentication" # Create a new steel thread directory - stp st done ST0001 # Mark ST0001 as complete - stp st list --status "In Progress" --width 100 # List all in-progress steel threads - stp st sync --write --width 100 # Update steel_threads.md with current ST state - stp st show ST0001 # Show info.md for ST0001 - stp st show ST0001 design # Show design.md for ST0001 - stp st show ST0001 all # Show all files for ST0001 - stp st edit ST0001 # Edit info.md for ST0001 - stp st edit ST0001 impl # Edit impl.md for ST0001 - stp st organize --write # Organize directories by status - -Steel Thread Structure (v1.2.1+): - Steel threads are organized as directories containing multiple files: - - ST####/ - ├── info.md # Main information (metadata, objective, context) - ├── design.md # Design decisions and approach - ├── impl.md # Implementation details - ├── tasks.md # Task tracking - └── results.md # Results and outcomes - - The info.md file contains the primary metadata: - --- - verblock: "Date:v0.1: Author - Description" - stp_version: 1.2.1 - status: In Progress - created: 20250307 - completed: - --- - - For full details on steel thread formats and migration, see the reference guide. diff --git a/stp/bin/.help/status.help.md b/stp/bin/.help/status.help.md deleted file mode 100644 index 217deb5..0000000 --- a/stp/bin/.help/status.help.md +++ /dev/null @@ -1,38 +0,0 @@ -@short: -Sync steel thread status based on Backlog task completion - -@description: -The status command helps maintain consistency between steel thread status -and the completion state of associated Backlog tasks. It can show current -status, suggest updates, and generate reports. - -@usage: -stp status <command> [options] [arguments] - -@commands: -show <ST####> Show status of steel thread and its tasks -sync <ST####> Update steel thread status based on tasks -report Generate status report for all active threads - -@options: ---dry-run Show what would be changed without updating - -@examples: -# Show current status -stp status show ST0014 - -# Sync steel thread status -stp status sync ST0014 - -# Preview changes without updating -stp status sync ST0014 --dry-run - -# Generate overall status report -stp status report - -@notes: -- Status is determined by task completion: - - Not Started: No tasks or all tasks in draft - - In Progress: At least one task todo or in-progress - - Completed: All tasks done -- Manual status overrides are preserved when appropriate \ No newline at end of file diff --git a/stp/bin/.help/stp.help.md b/stp/bin/.help/stp.help.md deleted file mode 100644 index b751360..0000000 --- a/stp/bin/.help/stp.help.md +++ /dev/null @@ -1,27 +0,0 @@ -# stp - -@short: -Steel Thread Process - A system for structured development and documentation with LLM collaboration - -@desc: -STP (Steel Thread Process) provides a structured process for developing software -in collaboration with Large Language Models (LLMs). It helps manage documentation, -track progress, and maintain context across development sessions. - -STP organizes work into "steel threads" - self-contained units of work that -focus on implementing specific pieces of functionality. It provides templates, -scripts, and process guidelines to enhance productivity while ensuring -high-quality documentation. - -@usage: -stp <command> [options] [arguments] - -Commands: - init Initialize STP in a project - st Manage steel threads - help Display help information - -Examples: - stp init "My Project" # Initialize STP in the current directory - stp st new "Implement Auth" # Create a new steel thread - stp help st # Display help for the 'st' command diff --git a/stp/bin/.help/task.help.md b/stp/bin/.help/task.help.md deleted file mode 100644 index 6e960ee..0000000 --- a/stp/bin/.help/task.help.md +++ /dev/null @@ -1,30 +0,0 @@ -@short: -Manage Backlog tasks linked to Steel Threads - -@description: -The task command integrates STP with Backlog.md for fine-grained task tracking. -It allows you to create, list, and synchronize tasks associated with steel threads. - -@usage: -stp task <command> [options] [arguments] - -@commands: -create <ST####> <title> Create a new task for a steel thread -list <ST####> List all tasks for a steel thread -sync <ST####> Sync task status with steel thread - -@examples: -# Create a new task -stp task create ST0014 "Update documentation" - -# List all tasks for a steel thread -stp task list ST0014 - -# Sync task status -stp task sync ST0014 - -@notes: -- Tasks are stored in the backlog/tasks directory -- Task names follow the pattern: ST#### - <description> -- Tasks have status: todo, in-progress, or done -- Use 'backlog' directly for advanced task management \ No newline at end of file diff --git a/stp/bin/.help/upgrade.help.md b/stp/bin/.help/upgrade.help.md deleted file mode 100644 index 964a335..0000000 --- a/stp/bin/.help/upgrade.help.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.2: Updated for v1.2.1 directory migration" ---- -# upgrade - -@short: -Upgrade STP files to the latest format - -@desc: -The upgrade command scans all STP files and brings them up to date with the latest version. -It adds or updates metadata and ensures all files follow the current format standards. - -The upgrade process includes: -- Adding STP version information to files -- Adding or updating YAML frontmatter metadata -- Adding section markers to steel_threads.md for sync -- Ensuring all files have the correct structure and format -- Migrating steel threads from single files to directories (v1.2.0 → v1.2.1) - -The command checks the version of each file and only upgrades files that need it. -For major version differences, a warning is displayed unless --force is used. - -@usage: -stp upgrade [--force] [--organize] - -Options: - --force Force upgrade even for major version differences - --organize Organize steel thread directories by status after upgrade - -@examples: -# Upgrade all STP files -stp upgrade - -# Force upgrade even for major version differences -stp upgrade --force - -@notes: -- The upgrade process doesn't remove any content from your files -- All files are backed up before modification -- After upgrading, run 'stp st sync' to update the steel_threads.md file -- The current STP version is stored in each file's frontmatter -- v1.2.1 migration: Converts ST####.md files to ST####/ directories - - Splits content into separate files (info.md, design.md, impl.md, etc.) - - Backs up original files to .stp_backup/1.2.1/ - - Preserves all content and metadata \ No newline at end of file diff --git a/stp/bin/bootstrap b/stp/bin/bootstrap deleted file mode 100755 index edb2c7c..0000000 --- a/stp/bin/bootstrap +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash -# bootstrap - Create STP directory structure and initial files -# -# This is a standalone script that does not require the stp command. -# It bootstraps the entire STP directory structure and creates initial files. -# -# Usage: ./bootstrap [author] - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Get author name (default to git config or current user) -AUTHOR="${1:-$(git config user.name 2>/dev/null || echo "$USER")}" -DATE="$(date '+%d %b %Y')" - -echo "Creating STP directory structure with author: $AUTHOR" - -# Create directory structure -mkdir -p {stp/{_templ/{prj/st,eng/tpd,usr,llm},bin/.help,prj/st,eng/tpd,usr,llm},bin} - -# Function to create a markdown file with propblock -create_md_file() { - local file="$1" - local title="$2" - - mkdir -p "$(dirname "$file")" - - cat > "$file" << EOF ---- -verblock: "$DATE:v0.1: $AUTHOR - Initial version" ---- -# $title -EOF - - echo "Created: $file" -} - -# Function to create a template markdown file with generic title -create_template_md_file() { - local file="$1" - local title="$2" - - mkdir -p "$(dirname "$file")" - - cat > "$file" << EOF ---- -verblock: "$DATE:v0.1: $AUTHOR - Initial version" ---- -# $title - -[Generic template content - replace with project-specific information] -EOF - - echo "Created template: $file" -} - -# Create template files -create_template_md_file "stp/_templ/prj/_wip.md" "Work In Progress Template" -create_template_md_file "stp/_templ/prj/_journal.md" "Journal Template" -create_template_md_file "stp/_templ/prj/st/_steel_threads.md" "Steel Threads Index Template" -create_template_md_file "stp/_templ/prj/st/_ST####.md" "Steel Thread Template" - -create_template_md_file "stp/_templ/eng/tpd/_technical_product_design.md" "Technical Product Design Template" -create_template_md_file "stp/_templ/eng/tpd/_1_introduction.md" "Introduction Template" -create_template_md_file "stp/_templ/eng/tpd/_2_requirements.md" "Requirements Template" -create_template_md_file "stp/_templ/eng/tpd/_3_architecture.md" "Architecture Template" -create_template_md_file "stp/_templ/eng/tpd/_4_detailed_design.md" "Detailed Design Template" -create_template_md_file "stp/_templ/eng/tpd/_5_implementation_strategy.md" "Implementation Strategy Template" -create_template_md_file "stp/_templ/eng/tpd/_6_deployment_and_operations.md" "Deployment and Operations Template" -create_template_md_file "stp/_templ/eng/tpd/_7_technical_challenges_and_mitigations.md" "Technical Challenges and Mitigations Template" -create_template_md_file "stp/_templ/eng/tpd/_8_appendices.md" "Appendices Template" - -create_template_md_file "stp/_templ/usr/_user_guide.md" "User Guide Template" -create_template_md_file "stp/_templ/usr/_reference_guide.md" "Reference Guide Template" -create_template_md_file "stp/_templ/usr/_deployment_guide.md" "Deployment Guide Template" - -create_template_md_file "stp/_templ/llm/_llm_preamble.md" "LLM Preamble Template" - -# Create project files -create_md_file "stp/prj/wip.md" "Work In Progress" -create_md_file "stp/prj/journal.md" "Project Journal" -create_md_file "stp/prj/st/steel_threads.md" "Steel Threads" -create_md_file "stp/prj/st/ST0001.md" "ST0001: Directory Structure" -create_md_file "stp/prj/st/ST0002.md" "ST0002: Core Script Framework" - -# Create engineering files -create_md_file "stp/eng/tpd/technical_product_design.md" "Technical Product Design" -create_md_file "stp/eng/tpd/1_introduction.md" "Introduction" -create_md_file "stp/eng/tpd/2_requirements.md" "Requirements" -create_md_file "stp/eng/tpd/3_architecture.md" "Architecture" -create_md_file "stp/eng/tpd/4_detailed_design.md" "Detailed Design" -create_md_file "stp/eng/tpd/5_implementation_strategy.md" "Implementation Strategy" -create_md_file "stp/eng/tpd/6_deployment_and_operations.md" "Deployment and Operations" -create_md_file "stp/eng/tpd/7_technical_challenges_and_mitigations.md" "Technical Challenges and Mitigations" -create_md_file "stp/eng/tpd/8_appendices.md" "Appendices" - -# Create user files -create_md_file "stp/usr/user_guide.md" "User Guide" -create_md_file "stp/usr/reference_guide.md" "Reference Guide" -create_md_file "stp/usr/deployment_guide.md" "Deployment Guide" - -# Create LLM files -create_md_file "stp/llm/llm_preamble.md" "LLM Preamble" - -# Create help files -create_md_file "stp/bin/.help/init.help.md" "init" -create_md_file "stp/bin/.help/st.help.md" "st" -create_md_file "stp/bin/.help/help.help.md" "help" - -# Create script files (just placeholders, not executable yet) -touch bin/stp -touch bin/stp_init -touch bin/stp_st -touch bin/stp_help -touch bin/bootstrap - -# Also create copies in stp/bin -touch stp/bin/stp -touch stp/bin/stp_init -touch stp/bin/stp_st -touch stp/bin/stp_help -touch stp/bin/bootstrap - -# Make script files executable -echo "You'll need to add content to the scripts and make them executable with: chmod +x bin/stp* stp/bin/stp*" - -echo "" -echo "STP bootstrap complete! Directory structure and placeholder files created." -echo "Next steps:" -echo "1. Add content to the template files in stp/_templ/" -echo "2. Implement the script files in bin/ and stp/bin/" -echo "3. Make script files executable with: chmod +x bin/stp* stp/bin/stp*" \ No newline at end of file diff --git a/stp/bin/fix_status.sh b/stp/bin/fix_status.sh deleted file mode 100755 index 5ccf970..0000000 --- a/stp/bin/fix_status.sh +++ /dev/null @@ -1,97 +0,0 @@ -#\!/bin/bash - -# Function to modify the update_file_frontmatter function in stp_upgrade -modify_upgrade_script() { - local file="stp/bin/stp_upgrade" - local temp_file="${file}.tmp" - - if [ -f "$file" ]; then - # Create a backup - cp "$file" "${file}.bak" - - # Modify the script - awk ' - /function update_file_frontmatter/,/^}/ { - if ($0 ~ /echo "status: \\$status" >> "\\$temp_file"/) { - print " # Check if there is already a status in the YAML frontmatter"; - print " original_status=$(grep -m 1 \"^status:\" \"$file\" | sed \"s/^status: *//\")"; - print " if [ -n \"$original_status\" ]; then"; - print " echo \"status: $original_status\" >> \"$temp_file\""; - print " else"; - print " echo \"status: $status\" >> \"$temp_file\""; - print " fi"; - next; - } - } - { print; } - ' "$file" > "$temp_file" - - # Check if the modification worked - if grep -q "original_status" "$temp_file"; then - mv "$temp_file" "$file" - chmod +x "$file" - echo "Successfully modified stp_upgrade script to preserve status" - else - echo "Failed to modify stp_upgrade script" - rm "$temp_file" - fi - else - echo "Error: stp_upgrade script not found" - fi -} - -# Function to update the organize_st.sh script to better check file status -update_organize_script() { - local file="stp/bin/organize_st.sh" - - # Add code to check both status formats (YAML frontmatter and document body) - sed -i.bak 's/status=$(grep -m 1 "^\\\- \\\*\\\*Status\\\*\\\*:" "$file" | sed "s\/^\\\- \\\*\\\*Status\\\*\\\*: \/\/" | sed '"'"'s\/^[[:space:]]*\/\/;s\/[[:space:]]*$\/\/'"'"')/# Try document body status first\ - status=$(grep -m 1 "^\\\- \\\*\\\*Status\\\*\\\*:" "$file" | sed "s\/^\\\- \\\*\\\*Status\\\*\\\*: \/\/" | sed '"'"'s\/^[[:space:]]*\/\/;s\/[[:space:]]*$\/\/'"'"')\ - \ - # If empty or not found, try YAML frontmatter\ - if [ -z "$status" ]; then\ - status=$(grep -m 1 "^status:" "$file" | sed "s\/^status: *\/\/")\ - fi/g' "$file" - - # Check if the modification worked - if grep -q "# Try document body status first" "$file"; then - echo "Successfully updated organize_st.sh script" - else - echo "Failed to update organize_st.sh script" - mv "${file}.bak" "$file" - fi -} - -# Move ST0013 back to the right location -move_st0013() { - if [ -f "stp/prj/st/NOT-STARTED/ST0013.md" ]; then - # Update status in the file - sed -i.bak 's/status: Not Started/status: In Progress/' "stp/prj/st/NOT-STARTED/ST0013.md" - sed -i.bak 's/\- \*\*Status\*\*: Not Started/\- \*\*Status\*\*: In Progress/' "stp/prj/st/NOT-STARTED/ST0013.md" - - # Move the file back to the main directory - mv "stp/prj/st/NOT-STARTED/ST0013.md" "stp/prj/st/ST0013.md" - echo "Moved ST0013 back to main directory" - elif [ -f "stp/prj/st/ST0013.md" ]; then - echo "ST0013 is already in the correct location" - else - echo "ST0013 file not found" - fi -} - -# Update the steel_threads.md index file -update_index() { - local file="stp/prj/st/steel_threads.md" - local temp_file="${file}.tmp" - - sed -i.bak 's/\[ST0013\](\.\/NOT-STARTED\/ST0013)/\[ST0013\](\.\/ST0013)/' "$file" 2>/dev/null || true - echo "Updated index file references" -} - -# Main execution -echo "Fixing ST0013 status and location..." -move_st0013 -update_index -#modify_upgrade_script -#update_organize_script -echo "Fixes completed\!" diff --git a/stp/bin/migrate_st_to_dirs b/stp/bin/migrate_st_to_dirs deleted file mode 100755 index 801ad94..0000000 --- a/stp/bin/migrate_st_to_dirs +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash -# migrate_st_to_dirs - Migrate steel threads from single files to directory structure -# This script is called by stp_upgrade when upgrading from 1.2.0 to 1.2.1 - -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to split a steel thread file into multiple files -split_steel_thread() { - local st_file="$1" - local st_dir="$2" - local st_id=$(basename "$st_file" .md) - - echo " Migrating $st_id..." - - # Create the directory - mkdir -p "$st_dir" - - # Create temporary files for each section - local temp_info=$(mktemp) - local temp_design=$(mktemp) - local temp_impl=$(mktemp) - local temp_tasks=$(mktemp) - local temp_results=$(mktemp) - - # Track which section we're in - local current_section="header" - local has_design=false - local has_impl=false - local has_tasks=false - local has_results=false - - # Read the file line by line - while IFS= read -r line; do - # Detect section headers - if [[ "$line" =~ ^##\ Approach ]]; then - current_section="approach" - has_design=true - echo "$line" >> "$temp_design" - elif [[ "$line" =~ ^##\ Tasks ]]; then - current_section="tasks" - has_tasks=true - echo "$line" >> "$temp_tasks" - elif [[ "$line" =~ ^##\ Implementation ]]; then - current_section="implementation" - has_impl=true - echo "$line" >> "$temp_impl" - elif [[ "$line" =~ ^##\ Results ]]; then - current_section="results" - has_results=true - echo "$line" >> "$temp_results" - elif [[ "$line" =~ ^##\ Related\ Steel\ Threads ]] || [[ "$line" =~ ^##\ Context\ for\ LLM ]]; then - current_section="footer" - echo "$line" >> "$temp_info" - elif [[ "$line" =~ ^##\ (Objective|Context) ]]; then - current_section="info" - echo "$line" >> "$temp_info" - else - # Write to appropriate file based on current section - case "$current_section" in - header|info|footer) - echo "$line" >> "$temp_info" - ;; - approach) - echo "$line" >> "$temp_design" - ;; - tasks) - echo "$line" >> "$temp_tasks" - ;; - implementation) - echo "$line" >> "$temp_impl" - ;; - results) - echo "$line" >> "$temp_results" - ;; - esac - fi - done < "$st_file" - - # Move temp files to final locations - mv "$temp_info" "$st_dir/info.md" - - # Only create files if they have content - if [ "$has_design" = true ] && [ -s "$temp_design" ]; then - # Add header to design.md - { - echo "# Design - $st_id: $(grep "^# $st_id:" "$st_file" | sed "s/^# $st_id: //")" - echo "" - cat "$temp_design" - } > "$st_dir/design.md" - fi - rm -f "$temp_design" - - if [ "$has_impl" = true ] && [ -s "$temp_impl" ]; then - # Add header to impl.md - { - echo "# Implementation - $st_id: $(grep "^# $st_id:" "$st_file" | sed "s/^# $st_id: //")" - echo "" - cat "$temp_impl" - } > "$st_dir/impl.md" - fi - rm -f "$temp_impl" - - if [ "$has_tasks" = true ] && [ -s "$temp_tasks" ]; then - # Add header to tasks.md - { - echo "# Tasks - $st_id: $(grep "^# $st_id:" "$st_file" | sed "s/^# $st_id: //")" - echo "" - cat "$temp_tasks" - } > "$st_dir/tasks.md" - fi - rm -f "$temp_tasks" - - if [ "$has_results" = true ] && [ -s "$temp_results" ]; then - # Add header to results.md - { - echo "# Results - $st_id: $(grep "^# $st_id:" "$st_file" | sed "s/^# $st_id: //")" - echo "" - cat "$temp_results" - } > "$st_dir/results.md" - fi - rm -f "$temp_results" - - # Update stp_version in info.md - sed -i.bak "s/^stp_version: .*$/stp_version: 1.2.1/" "$st_dir/info.md" - rm -f "$st_dir/info.md.bak" - - echo " Created: info.md$([ -f "$st_dir/design.md" ] && echo ", design.md")$([ -f "$st_dir/impl.md" ] && echo ", impl.md")$([ -f "$st_dir/tasks.md" ] && echo ", tasks.md")$([ -f "$st_dir/results.md" ] && echo ", results.md")" -} - -# Main migration process -echo "Migrating steel threads to directory structure (v1.2.0 → v1.2.1)" -echo "" - -# Backup directory -BACKUP_DIR=".stp_backup/1.2.1" -mkdir -p "$BACKUP_DIR" - -# Find all steel thread files -ST_BASE_DIR="stp/prj/st" -ST_FILES=$(find "$ST_BASE_DIR" -name "ST[0-9][0-9][0-9][0-9].md" -type f 2>/dev/null || true) - -if [ -z "$ST_FILES" ]; then - echo "No steel thread files found to migrate." - exit 0 -fi - -# Count files -FILE_COUNT=$(echo "$ST_FILES" | wc -l | tr -d ' ') -echo "Found $FILE_COUNT steel thread files to migrate" -echo "" - -# Process each file -for st_file in $ST_FILES; do - # Get the base name and directory - st_id=$(basename "$st_file" .md) - st_parent_dir=$(dirname "$st_file") - - # Determine target directory based on current location - if [[ "$st_parent_dir" == *"/COMPLETED" ]]; then - target_dir="$st_parent_dir/$st_id" - elif [[ "$st_parent_dir" == *"/NOT-STARTED" ]]; then - target_dir="$st_parent_dir/$st_id" - elif [[ "$st_parent_dir" == *"/CANCELLED" ]]; then - target_dir="$st_parent_dir/$st_id" - else - # In main directory - target_dir="$st_parent_dir/$st_id" - fi - - # Backup the original file - backup_file="$BACKUP_DIR/$(basename "$st_file")" - cp "$st_file" "$backup_file" - - # Split the file into directory structure - split_steel_thread "$st_file" "$target_dir" - - # Remove the original file - rm "$st_file" -done - -echo "" -echo "Migration complete!" -echo "Original files backed up to: $BACKUP_DIR" -echo "" -echo "Note: You may want to run 'stp st organize --write' to ensure all" -echo "steel threads are in the correct status directories." \ No newline at end of file diff --git a/stp/bin/organize_st.sh b/stp/bin/organize_st.sh deleted file mode 100755 index e5f2847..0000000 --- a/stp/bin/organize_st.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash -# Script to organize steel thread directories by status -# Updated for v1.2.1 directory structure - -# Create required directories -mkdir -p stp/prj/st/COMPLETED stp/prj/st/NOT-STARTED stp/prj/st/CANCELLED - -# Move files based on their status -echo "Organizing files based on status..." - -# Find all ST directories -for dir in stp/prj/st/ST*/; do - if [ -d "$dir" ]; then - # Extract ID from directory name - id=$(basename "$dir") - - # Look for status in info.md file - info_file="${dir}info.md" - if [ ! -f "$info_file" ]; then - echo "Warning: $id has no info.md file" - continue - fi - - # Check both YAML frontmatter and document body for status - yaml_status=$(grep -m 1 "^status:" "$info_file" | sed "s/^status: *//") - body_status=$(grep -m 1 "^\- \*\*Status\*\*:" "$info_file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter status - if [ -n "$yaml_status" ]; then - status="$yaml_status" - elif [ -n "$body_status" ]; then - status="$body_status" - else - status="Not Started" - fi - - echo "Directory: $id - Status: $status" - - # Move directory to appropriate location - case "$status" in - "Completed") - echo "Moving $id to COMPLETED" - mv "$dir" "stp/prj/st/COMPLETED/$id" - ;; - "Not Started") - echo "Moving $id to NOT-STARTED" - mv "$dir" "stp/prj/st/NOT-STARTED/$id" - ;; - "Cancelled") - echo "Moving $id to CANCELLED" - mv "$dir" "stp/prj/st/CANCELLED/$id" - ;; - *) - # In Progress or On Hold stay in the main directory - echo "$id stays in main directory" - ;; - esac - fi -done - -# Also check subdirectories to make sure directories are in the right place -for subdir in stp/prj/st/COMPLETED/ stp/prj/st/NOT-STARTED/ stp/prj/st/CANCELLED/; do - if [ ! -d "$subdir" ]; then - continue - fi - - subdir_name=$(basename "$subdir") - - # Find all ST directories in this subdirectory - for dir in "$subdir"ST*/; do - if [ -d "$dir" ]; then - # Extract ID from directory name - id=$(basename "$dir") - - # Look for status in info.md file - info_file="${dir}info.md" - if [ ! -f "$info_file" ]; then - echo "Warning: $id has no info.md file" - continue - fi - - # Check both YAML frontmatter and document body for status - yaml_status=$(grep -m 1 "^status:" "$info_file" | sed "s/^status: *//") - body_status=$(grep -m 1 "^\- \*\*Status\*\*:" "$info_file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter status - if [ -n "$yaml_status" ]; then - status="$yaml_status" - elif [ -n "$body_status" ]; then - status="$body_status" - else - status="Not Started" - fi - - # Determine the correct directory - target_dir="stp/prj/st" - case "$status" in - "Completed") - target_dir="stp/prj/st/COMPLETED" - ;; - "Not Started") - target_dir="stp/prj/st/NOT-STARTED" - ;; - "Cancelled") - target_dir="stp/prj/st/CANCELLED" - ;; - *) - target_dir="stp/prj/st" - ;; - esac - - # Move the directory if it's in the wrong location - if [ "$subdir_name" == "COMPLETED" ] && [ "$status" != "Completed" ]; then - echo "Moving $id from COMPLETED to $target_dir" - mv "$dir" "$target_dir/$id" - elif [ "$subdir_name" == "NOT-STARTED" ] && [ "$status" != "Not Started" ]; then - echo "Moving $id from NOT-STARTED to $target_dir" - mv "$dir" "$target_dir/$id" - elif [ "$subdir_name" == "CANCELLED" ] && [ "$status" != "Cancelled" ]; then - echo "Moving $id from CANCELLED to $target_dir" - mv "$dir" "$target_dir/$id" - fi - fi - done -done - -echo "Organization complete!" diff --git a/stp/bin/stp b/stp/bin/stp deleted file mode 100755 index 7160d09..0000000 --- a/stp/bin/stp +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -# stp - Steel Thread Process main script -# Copyright (c) 2024 Matthew Sinclair -# Licensed under the MIT License (see LICENSE file) -# Usage: stp <command> [options] [arguments] - -# Version -STP_VERSION="1.2.0" - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Determine STP_HOME if not set -if [ -z "$STP_HOME" ]; then - # First try to determine from the script location - SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - if [ -d "$SCRIPT_DIR/../_templ" ]; then - # We're likely in the bin directory of an STP installation - export STP_HOME="$(cd "$SCRIPT_DIR/.." && pwd)" - elif [ -d "$SCRIPT_DIR/../../stp/_templ" ]; then - # We're in the bin directory under stp - export STP_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" - else - # Try to find STP in common locations - for dir in ~/stp ~/.stp /usr/local/stp; do - if [ -d "$dir" ]; then - export STP_HOME="$dir" - break - fi - done - fi - - # If still not found, error out - if [ -z "$STP_HOME" ]; then - error "Could not determine STP_HOME. Please set it manually." - fi -fi - -# Check if bin directory exists -if [ ! -d "$STP_HOME/stp/bin" ] && [ ! -d "$STP_HOME/bin" ]; then - error "Invalid STP_HOME: bin directory not found at $STP_HOME/stp/bin or $STP_HOME/bin" -fi - -# Determine bin directory location -if [ -d "$STP_HOME/stp/bin" ]; then - BIN_DIR="$STP_HOME/stp/bin" -else - BIN_DIR="$STP_HOME/bin" -fi - -# Display help if no arguments provided -if [ $# -eq 0 ]; then - exec "$BIN_DIR/stp_help" -fi - -# Get the command -COMMAND="$1" -shift - -# Handle version flag -if [ "$COMMAND" = "--version" ] || [ "$COMMAND" = "-v" ]; then - echo "STP version $STP_VERSION" - exit 0 -fi - -# Handle help command specially -if [ "$COMMAND" = "help" ]; then - exec "$BIN_DIR/stp_help" "$@" -fi - -# Check if command script exists -COMMAND_SCRIPT="$BIN_DIR/stp_$COMMAND" -if [ ! -f "$COMMAND_SCRIPT" ]; then - error "Unknown command '$COMMAND'. Run 'stp help' for usage information." -fi - -# Check if script is executable -if [ ! -x "$COMMAND_SCRIPT" ]; then - echo "Warning: Making script executable: $COMMAND_SCRIPT" >&2 - chmod +x "$COMMAND_SCRIPT" -fi - -# Execute command with remaining arguments -exec "$COMMAND_SCRIPT" "$@" \ No newline at end of file diff --git a/stp/bin/stp_backlog b/stp/bin/stp_backlog deleted file mode 100755 index 9d6b5c6..0000000 --- a/stp/bin/stp_backlog +++ /dev/null @@ -1,166 +0,0 @@ -#!/bin/bash -# stp_backlog - Wrapper for Backlog.md to provide tighter STP integration -# Usage: stp backlog <command> [options] [arguments] -# Also available as: stp bl <command> [options] [arguments] - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Get STP_HOME from environment or determine from script location -if [ -z "$STP_HOME" ]; then - SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - export STP_HOME="$(cd "$SCRIPT_DIR/../.." && pwd)" -fi - -# Check if backlog is installed -if ! command -v backlog &> /dev/null; then - echo "Error: Backlog.md is not installed" >&2 - echo "" >&2 - cat "$STP_HOME/stp/bin/.help/backlog-install.help.md" >&2 - exit 1 -fi - -# Function to display usage -usage() { - echo "Usage: stp backlog <command> [options] [arguments]" - echo " stp bl <command> [options] [arguments]" - echo "" - echo "STP wrapper for Backlog.md task management" - echo "" - echo "Commands:" - echo " init Initialize backlog in current project" - echo " task <subcommand> Task management (create, list, edit, etc.)" - echo " list List all tasks (alias for 'task list --plain')" - echo " create <ST####> <title> Create a task linked to a steel thread" - echo " board Display tasks in Kanban board" - echo " config Manage backlog configuration" - echo " browser Open browser interface" - echo "" - echo "This wrapper:" - echo " - Automatically uses --plain for list commands to avoid git errors" - echo " - Disables remote operations for local projects" - echo " - Provides shortcuts for common STP workflows" - echo "" - echo "Examples:" - echo " stp bl list # List all tasks without git fetch" - echo " stp bl create ST0014 \"Fix bug\" # Create task linked to ST0014" - echo " stp bl task edit task-5 # Edit a specific task" - echo "" - echo "For full backlog documentation, run: backlog help" -} - -# Initialize backlog with STP-friendly defaults -init_backlog() { - # Run backlog init - backlog init "$@" - - # Configure for local use - if [ -f "backlog/config.yml" ]; then - echo "Configuring backlog for STP integration..." - # Disable remote operations to prevent git fetch errors - backlog config set remoteOperations false >/dev/null 2>&1 || true - # Set default status to match STP conventions - backlog config set defaultStatus "To Do" >/dev/null 2>&1 || true - echo "Backlog configured for local STP use." - fi -} - -# Create a task with STP conventions -create_task() { - local st_id="$1" - local title="$2" - - if [ -z "$st_id" ] || [ -z "$title" ]; then - error "Usage: stp bl create <ST####> <title>" - fi - - # Validate steel thread ID format - if ! echo "$st_id" | grep -qE '^ST[0-9]{4}$'; then - error "Invalid steel thread ID format. Expected: ST####" - fi - - # Create the task with full title - local full_title="$st_id - $title" - backlog task create "$full_title" -} - -# Process commands -case "${1:-}" in - init) - shift - init_backlog "$@" - ;; - - list) - # Shortcut for 'task list --plain' - shift - # Add --plain if not already present - if ! echo "$@" | grep -q -- "--plain"; then - backlog task list --plain "$@" - else - backlog task list "$@" - fi - ;; - - create) - # Special STP create command - shift - create_task "$@" - ;; - - board) - shift - # Pass through board command without modification - # Note: board doesn't support --plain option - backlog board "$@" - ;; - - task|tasks) - # Handle task subcommands - subcommand="${2:-}" - case "$subcommand" in - list) - # Add --plain to task list - shift 2 - if ! echo "$@" | grep -q -- "--plain"; then - backlog task list --plain "$@" - else - backlog task list "$@" - fi - ;; - *) - # Pass through other task commands - shift - backlog task "$@" - ;; - esac - ;; - - config) - # Pass through config commands - shift - backlog config "$@" - ;; - - browser) - # Pass through browser command - shift - backlog browser "$@" - ;; - - -h|--help|help|"") - usage - exit 0 - ;; - - *) - # Pass through any other commands - backlog "$@" - ;; -esac \ No newline at end of file diff --git a/stp/bin/stp_bl b/stp/bin/stp_bl deleted file mode 100755 index bc40f55..0000000 --- a/stp/bin/stp_bl +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# stp_bl - Shorthand alias for stp_backlog -# This simply calls stp_backlog with all arguments - -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Call stp_backlog with all arguments -exec "$SCRIPT_DIR/stp_backlog" "$@" \ No newline at end of file diff --git a/stp/bin/stp_help b/stp/bin/stp_help deleted file mode 100755 index cdcb2c8..0000000 --- a/stp/bin/stp_help +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# stp_help - Display help for STP commands -# Usage: stp_help [command] - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Check if STP_HOME is set -if [ -z "$STP_HOME" ]; then - error "STP_HOME environment variable is not set" -fi - -# Display command-specific help -if [ $# -eq 1 ]; then - COMMAND="$1" - HELP_FILE="$STP_HOME/stp/bin/.help/$COMMAND.help.md" - - if [ -f "$HELP_FILE" ]; then - # Display help file - cat "$HELP_FILE" - else - # Check if command exists but doesn't have help - COMMAND_SCRIPT="$STP_HOME/stp/bin/stp_$COMMAND" - if [ -f "$COMMAND_SCRIPT" ]; then - echo "No help available for command '$COMMAND'" - echo "" - echo "Usage information may be available by running:" - echo " $COMMAND_SCRIPT --help" - else - error "Unknown command '$COMMAND'" - fi - fi - exit 0 -fi - -# Display general help -cat << EOF -STP - Steel Thread Process - -A system for structured development and documentation with LLM collaboration. - -Usage: stp <command> [options] [arguments] - -Available commands: -EOF - -# Find all stp_* commands in stp/bin directory -for script in "$STP_HOME"/stp/bin/stp_*; do - if [ -f "$script" ]; then - # Extract command name from script name (remove stp_ prefix) - cmd_name=$(basename "$script" | sed 's/^stp_//') - - # Construct help file path - help_file="$STP_HOME/stp/bin/.help/$cmd_name.help.md" - - # Get short description from help file if it exists - if [ -f "$help_file" ]; then - # Extract text between @short: and the next section (@) - # Properly handle multiline descriptions by joining them with spaces - short_desc=$(awk '/^@short:/{flag=1; next} /^@/{if(flag){flag=0}} flag' "$help_file" | \ - awk '{$1=$1}1' | \ - tr '\n' ' ' | \ - sed 's/^ *//; s/ *$//') - printf " %-6s %s\n" "$cmd_name" "$short_desc" - else - printf " %-6s %s\n" "$cmd_name" "No description available" - fi - fi -done - -cat << EOF - -For more information on a specific command, run: - stp help <command> - -For complete documentation, see: - stp/usr/user_guide.md - Task-based guide for users - stp/usr/reference_guide.md - Complete reference for all features -EOF \ No newline at end of file diff --git a/stp/bin/stp_init b/stp/bin/stp_init deleted file mode 100755 index d454f80..0000000 --- a/stp/bin/stp_init +++ /dev/null @@ -1,253 +0,0 @@ -#!/bin/bash -# stp_init - Initialize STP in a project -# Usage: stp_init [options] <project_name> [directory] - -# Exit on error -set -e - -# Default directories to copy -DEFAULT_DIRS_TO_COPY="eng,llm,prj,usr" - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to display usage information -usage() { - echo "Usage: stp init [options] <project_name> [directory]" - echo "" - echo "Initialize Steel Thread Process in the specified directory" - echo "" - echo "Options:" - echo " -d, --dirs Comma-separated list of directories to copy (default: $DEFAULT_DIRS_TO_COPY)" - echo " -a, --all Copy all directories, including bin, _templ, tests" - echo "" - echo "Arguments:" - echo " project_name Name of the project (required)" - echo " directory Target directory (optional, defaults to current directory)" - echo "" - echo "Example:" - echo " stp init \"My Project\" ./my-project" - echo " stp init --dirs \"eng,llm,prj,usr,bin\" \"My Project\" ./my-project" - exit 1 -} - -# Initialize variables -DIRS_TO_COPY="$DEFAULT_DIRS_TO_COPY" -COPY_ALL=false - -# Parse options -while [[ $# -gt 0 ]]; do - case "$1" in - -d|--dirs) - DIRS_TO_COPY="$2" - shift 2 - ;; - -a|--all) - COPY_ALL=true - shift - ;; - -*) - error "Unknown option: $1" - usage - ;; - *) - # First non-option argument is project name - break - ;; - esac -done - -# Check for required arguments -if [ $# -lt 1 ]; then - error "Project name is required" - usage -fi - -# Get arguments -PROJECT_NAME="$1" -TARGET_DIR="${2:-.}" - -# If copy_all is true, override dirs_to_copy -if [ "$COPY_ALL" = true ]; then - DIRS_TO_COPY="eng,llm,prj,usr,bin,_templ,tests" -fi - -# Check if STP_HOME is set -if [ -z "$STP_HOME" ]; then - error "STP_HOME environment variable is not set" -fi - -# Create target directory if it doesn't exist -mkdir -p "$TARGET_DIR" - -# Check if target directory is empty if it already exists -if [ -d "$TARGET_DIR" ] && [ "$(ls -A "$TARGET_DIR")" ]; then - echo "Warning: Target directory is not empty. STP will add files to the existing directory." - echo "Press Enter to continue or Ctrl+C to cancel." - read -r -fi - -# Create directory structure -echo "Creating STP directory structure..." -mkdir -p "$TARGET_DIR/stp" - -# Create directories based on whitelist -IFS=',' read -ra DIRS <<< "$DIRS_TO_COPY" -for dir in "${DIRS[@]}"; do - case "$dir" in - eng) - mkdir -p "$TARGET_DIR/stp/eng/tpd" - ;; - llm) - mkdir -p "$TARGET_DIR/stp/llm" - ;; - prj) - mkdir -p "$TARGET_DIR/stp/prj/st" - ;; - usr) - mkdir -p "$TARGET_DIR/stp/usr" - ;; - _templ) - mkdir -p "$TARGET_DIR/stp/_templ" - ;; - bin) - mkdir -p "$TARGET_DIR/stp/bin" - ;; - tests) - mkdir -p "$TARGET_DIR/stp/tests" - ;; - *) - echo "Warning: Unknown directory '$dir' in whitelist" - ;; - esac -done - -# Get author information from git if available -if command -v git &> /dev/null && git config user.name &> /dev/null; then - AUTHOR="$(git config user.name)" -else - AUTHOR="${USER:-Unknown}" -fi - -# Get current date -DATE="$(date '+%Y-%m-%d')" - -# Create configuration file -echo "Creating configuration file..." -mkdir -p "$TARGET_DIR/stp/.config" -cat > "$TARGET_DIR/stp/.config/config" << EOF -# STP Project Configuration -PROJECT_NAME="$PROJECT_NAME" -AUTHOR="$AUTHOR" -CREATED_DATE="$DATE" -ST_PREFIX="ST" -EOF - -# Find the template directory -if [ -d "$STP_HOME/stp/_templ" ]; then - TEMPL_DIR="$STP_HOME/stp/_templ" -else - TEMPL_DIR="$STP_HOME/_templ" -fi - -# Copy templates only if in whitelist -if [[ "$DIRS_TO_COPY" == *"_templ"* ]]; then - echo "Copying templates..." - cp -r "$TEMPL_DIR"/* "$TARGET_DIR/stp/_templ/" -fi - -# Create initial documents -echo "Creating initial documents..." - -# Create prj documents if in whitelist -if [[ "$DIRS_TO_COPY" == *"prj"* ]]; then - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/prj/_wip.md" > "$TARGET_DIR/stp/prj/wip.md" - - # Create initial prj/st/steel_threads.md - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/prj/st/_steel_threads.md" > "$TARGET_DIR/stp/prj/st/steel_threads.md" -fi - -# Create eng documents if in whitelist -if [[ "$DIRS_TO_COPY" == *"eng"* ]]; then - # Create initial eng/tpd/technical_product_design.md - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/eng/tpd/_technical_product_design.md" > "$TARGET_DIR/stp/eng/tpd/technical_product_design.md" -fi - -# Create llm documents if in whitelist -if [[ "$DIRS_TO_COPY" == *"llm"* ]]; then - # Create initial llm/llm_preamble.md - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/llm/_llm_preamble.md" > "$TARGET_DIR/stp/llm/llm_preamble.md" -fi - -# Create usr documents if in whitelist -if [[ "$DIRS_TO_COPY" == *"usr"* ]]; then - # Create initial usr/ documents - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/usr/_user_guide.md" > "$TARGET_DIR/stp/usr/user_guide.md" - - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/usr/_reference_guide.md" > "$TARGET_DIR/stp/usr/reference_guide.md" - - sed -e "s/\[\[PROJECT_NAME\]\]/$PROJECT_NAME/g" \ - -e "s/\[\[AUTHOR\]\]/$AUTHOR/g" \ - -e "s/\[\[DATE\]\]/$DATE/g" \ - "$TEMPL_DIR/usr/_deployment_guide.md" > "$TARGET_DIR/stp/usr/deployment_guide.md" -fi - -# Copy scripts only if explicitly in whitelist -if [[ "$DIRS_TO_COPY" == *"bin"* ]]; then - echo "Copying scripts..." - if [ -d "$STP_HOME/stp/bin" ]; then - cp "$STP_HOME/stp/bin"/stp* "$TARGET_DIR/stp/bin/" - else - cp "$STP_HOME/bin"/stp* "$TARGET_DIR/stp/bin/" - fi - - # Make scripts executable - chmod +x "$TARGET_DIR"/stp/bin/stp* -fi - -# Create local configuration for STP -echo "Creating local configuration..." -cat > "$TARGET_DIR/stp/.config/stp_config.sh" << EOF -#!/bin/bash -# Local STP configuration for $PROJECT_NAME - -# Project settings -export STP_PROJECT="$PROJECT_NAME" -export STP_AUTHOR="$AUTHOR" -EOF - -echo "STP initialized for project: $PROJECT_NAME in $TARGET_DIR" -echo "Included directories: $DIRS_TO_COPY" -echo "" -echo "To get started:" -echo " 1. Review the initial documentation in the created directories" - -# Show appropriate messages based on which directories were created -if [[ "$DIRS_TO_COPY" == *"prj"* ]]; then - echo " 2. Create your first steel thread: stp st new \"Initial Setup\"" - echo " 3. Update stp/prj/wip.md with your current focus" -fi - -echo "" -echo "For help, run: stp help" \ No newline at end of file diff --git a/stp/bin/stp_st.bak b/stp/bin/stp_st.bak deleted file mode 100755 index 7606786..0000000 --- a/stp/bin/stp_st.bak +++ /dev/null @@ -1,866 +0,0 @@ -#!/bin/bash -# stp_st - Manage steel threads -# Usage: stp_st <command> [options] [arguments] - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to display usage information -usage() { - echo "Usage: stp st <command> [options] [arguments]" - echo "" - echo "Manage steel threads for the project" - echo "" - echo "Commands:" - echo " new <title> Create a new steel thread" - echo " done <id> Mark a steel thread as complete" - echo " list [--status <status>] [--width N] List all steel threads" - echo " sync [--write] [--width N] Synchronize steel_threads.md with individual ST files" - echo " organize [--write] Organize ST files in directories by status" - echo " show <id> Show details of a specific steel thread" - echo " edit <id> Open a steel thread in your default editor" - echo "" - echo "Examples:" - echo " stp st new \"Implement Feature X\"" - echo " stp st done ST0001" - echo " stp st list --status \"In Progress\" --width 100" - echo " stp st sync --write --width 100" - echo " stp st organize --write" - echo " stp st show ST0001" - echo " stp st edit 1" - exit 1 -} - -# Check for required arguments -if [ $# -lt 1 ]; then - error "Steel thread command is required" - usage -fi - -# Load project configuration if available -if [ -f stp/.config/config ]; then - source stp/.config/config -elif [ -f .stp-config ]; then - # For backward compatibility - source .stp-config -fi - -# Get command -ST_COMMAND="$1" -shift - -# Function to determine the appropriate path for a steel thread based on its status -get_st_path() { - local st_id="$1" - local status="$2" - local base_dir="stp/prj/st" - local test_env=0 - - # Check if we're in a test environment - either from TEST_TEMP_DIR or from being in a /tmp directory - if [[ "${TEST_TEMP_DIR:-}" != "" ]] || [[ "$(pwd)" == /tmp* ]] || [[ "$(pwd)" == */stp/tests/* ]]; then - test_env=1 - fi - - # If we're in a test environment, just use the main directory - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id.md" - return - fi - - # If status is not provided, try to find the steel thread and read its status - if [ -z "$status" ]; then - # Check all possible locations - local possible_locations=( - "$base_dir/$st_id.md" - "$base_dir/COMPLETED/$st_id.md" - "$base_dir/NOT-STARTED/$st_id.md" - "$base_dir/CANCELLED/$st_id.md" - ) - - for location in "${possible_locations[@]}"; do - if [ -f "$location" ]; then - # Check both YAML frontmatter and document body for status - yaml_status=$(grep -m 1 "^status:" "$location" | sed "s/^status: *//") - body_status=$(grep -m 1 "^\- \*\*Status\*\*:" "$location" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter status - if [ -n "$yaml_status" ]; then - status="$yaml_status" - elif [ -n "$body_status" ]; then - status="$body_status" - else - status="Not Started" - fi - - break - fi - done - fi - - # Return the appropriate directory based on status - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id.md" - else - case "$status" in - "Completed") - echo "$base_dir/COMPLETED/$st_id.md" - ;; - "Not Started") - echo "$base_dir/NOT-STARTED/$st_id.md" - ;; - "Cancelled") - echo "$base_dir/CANCELLED/$st_id.md" - ;; - *) - # In Progress or On Hold stay in the main directory - echo "$base_dir/$st_id.md" - ;; - esac - fi -} - -# Function to get the next steel thread ID -get_next_steel_thread_id() { - local st_prefix="${ST_PREFIX:-ST}" - local base_dir="stp/prj/st" - local next_id=1 - local max_id=0 - - # Find all ST files in all directories - for file in $(find "$base_dir" -type f -name "$st_prefix[0-9][0-9][0-9][0-9].md" 2>/dev/null); do - # Extract numeric part of filename - local id_str=$(basename "$file" .md) - id_str=${id_str#$st_prefix} - - # Convert to number and compare - if [[ "$id_str" =~ ^[0-9]+$ ]]; then - local id=$((10#$id_str)) - if [ $id -gt $max_id ]; then - max_id=$id - fi - fi - done - - # Increment for the next ID - next_id=$((max_id + 1)) - - # Format with leading zeros (4 digits) - printf "%s%04d" "$st_prefix" $next_id -} - -# Function to update steel threads index -update_steel_threads_index() { - local id="$1" - local title="$2" - local status="$3" - local created="$4" - local completed="$5" - local index_file="stp/prj/st/steel_threads.md" - - # Create index file if it doesn't exist - if [ ! -f "$index_file" ]; then - mkdir -p "$(dirname "$index_file")" - cat > "$index_file" << EOF -# Steel Threads - -This document serves as an index of all steel threads in the project. - -## Index - -| ID | Title | Status | Created | Completed | -| ----------------------- | -------------------- | ------------ | ---------- | ---------- | -EOF - fi - - # Check if entry already exists - if grep -q "^| $id " "$index_file"; then - # Update existing entry - sed -i.bak "s/^| $id .*$/| $id | $title | $status | $created | $completed |/" "$index_file" - rm -f "$index_file.bak" - else - # Add new entry - echo "| $id | $title | $status | $created | $completed |" >> "$index_file" - fi -} - -# Normalize the command (handle alternative spelling) -if [ "$ST_COMMAND" = "organise" ]; then - ST_COMMAND="organize" -fi - -# Handle different commands -case "$ST_COMMAND" in - "new") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread title is required" - usage - fi - - TITLE="$1" - ST_ID=$(get_next_steel_thread_id) - ST_STATUS="Not Started" - ST_FILE=$(get_st_path "$ST_ID" "$ST_STATUS") - DATE=$(date '+%Y-%m-%d') - AUTHOR="${STP_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" - - # Create directory if it doesn't exist - mkdir -p "$(dirname "$ST_FILE")" - - # Create steel thread file from template - if [ -f "stp/_templ/prj/st/_ST####.md" ]; then - # Format date in both formats - YYYY-MM-DD for display and YYYYMMDD for frontmatter - DATE_COMPACT=$(date '+%Y%m%d') - - sed -e "s/ST####/$ST_ID/g" \ - -e "s/\[Title\]/$TITLE/g" \ - -e "s/\[Not Started|In Progress|Completed|On Hold|Cancelled\]/$ST_STATUS/g" \ - -e "s/YYYY-MM-DD/$DATE/g" \ - -e "s/YYYYMMDD/$DATE_COMPACT/g" \ - -e "s/\[Author Name\]/$AUTHOR/g" \ - "stp/_templ/prj/st/_ST####.md" > "$ST_FILE" - else - # Create file without template - DATE_COMPACT=$(date '+%Y%m%d') - cat > "$ST_FILE" << EOF ---- -verblock: "$(date '+%d %b %Y'):v0.1: $AUTHOR - Initial version" -stp_version: 1.0.0 -status: $ST_STATUS -created: $DATE_COMPACT -completed: ---- -# $ST_ID: $TITLE - -- **Status**: $ST_STATUS -- **Created**: $DATE -- **Completed**: -- **Author**: $AUTHOR - -## Objective -[Clear statement of what this steel thread aims to accomplish] - -## Context -[Background information and context for this steel thread] - -## Approach -[Planned approach for implementing this steel thread] - -## Tasks -- [ ] Task 1 -- [ ] Task 2 -- [ ] Task 3 - -## Implementation Notes -[Notes on implementation details, decisions, challenges, etc.] - -## Results -[Summary of results after completion] - -## Related Steel Threads -- [List any related steel threads here] -EOF - fi - - # Update index - update_steel_threads_index "$ST_ID" "$TITLE" "$ST_STATUS" "$DATE" "" - - echo "Created steel thread: $ST_ID: $TITLE" - echo "Edit file: $ST_FILE" - ;; - - "done") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - # If the ID doesn't start with ST, prepend it - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - # Find the steel thread file (could be in any subdirectory) - ST_FILE=$(get_st_path "$ST_ID") - DATE=$(date '+%Y-%m-%d') - - # Check if steel thread exists - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - # Extract title - TITLE=$(grep "^# $ST_ID:" "$ST_FILE" | sed "s/^# $ST_ID: //") - - # Update status and completion date in the file - sed -i.bak "s/^\- \*\*Status\*\*: .*$/- **Status**: Completed/" "$ST_FILE" - sed -i.bak "s/^\- \*\*Completed\*\*: .*$/- **Completed**: $DATE/" "$ST_FILE" - - # Update status in YAML frontmatter - sed -i.bak "s/^status: .*$/status: Completed/" "$ST_FILE" - sed -i.bak "s/^completed: .*$/completed: $(date '+%Y%m%d')/" "$ST_FILE" - - rm -f "$ST_FILE.bak" - - # Get the target location for this file - NEW_ST_FILE=$(get_st_path "$ST_ID" "Completed") - - # Move the file to the appropriate directory if it's different - if [ "$ST_FILE" != "$NEW_ST_FILE" ]; then - mkdir -p "$(dirname "$NEW_ST_FILE")" - mv "$ST_FILE" "$NEW_ST_FILE" - echo "Moved steel thread to: $NEW_ST_FILE" - fi - - # Update index - update_steel_threads_index "$ST_ID" "$TITLE" "Completed" "$(grep '^\- \*\*Created\*\*:' "$NEW_ST_FILE" | sed 's/^\- \*\*Created\*\*: //')" "$DATE" - - echo "Marked steel thread as complete: $ST_ID: $TITLE" - ;; - - "list") - # Parse options - STATUS="" - WIDTH=0 # Default to terminal width - while [ $# -gt 0 ]; do - case "$1" in - --status) - shift - STATUS="$1" - shift - ;; - --width) - shift - WIDTH="$1" - shift - ;; - *) - error "Unknown option: $1" - ;; - esac - done - - ST_DIR="stp/prj/st" - - # Check if ST directory exists - if [ ! -d "$ST_DIR" ]; then - error "Steel threads directory not found" - fi - - # Determine table width - if [ "$WIDTH" -gt 0 ]; then - # Use specified width - TABLE_WIDTH=$WIDTH - else - # For sync command with --write flag, always use 80 columns - # This ensures consistent formatting in the committed file - if [ "$ST_COMMAND" = "sync" ] && [ $WRITE_MODE -eq 1 ]; then - TABLE_WIDTH=80 - else - # Use terminal width - try multiple methods - # First check COLUMNS env var - if [ -n "$COLUMNS" ]; then - TABLE_WIDTH=$COLUMNS - elif [ -t 1 ]; then - # Try stty - STTY_SIZE=$( (stty size 2>/dev/null || echo "24 80") | cut -d' ' -f2) - if [ -n "$STTY_SIZE" ] && [ "$STTY_SIZE" -gt 0 ]; then - TABLE_WIDTH=$STTY_SIZE - else - # Try tput as fallback - TPUT_COLS=$(tput cols 2>/dev/null || echo 80) - TABLE_WIDTH=$TPUT_COLS - fi - else - # Not connected to a terminal, use default - TABLE_WIDTH=80 - fi - fi - # Debug output - comment out for production use - # echo "DEBUG: Terminal width detected: $TABLE_WIDTH" >&2 - fi - - # Dynamically allocate column widths based on terminal width - # Minimum widths for each column - MIN_ID_WIDTH=10 - MIN_STATUS_WIDTH=12 - MIN_DATE_WIDTH=10 - MIN_TITLE_WIDTH=20 - - # Fixed column sizes (ID, Status, Created, Completed) - FIXED_MIN_WIDTH=$((MIN_ID_WIDTH + MIN_STATUS_WIDTH + MIN_DATE_WIDTH + MIN_DATE_WIDTH)) - - # Account for separators and padding - SEPARATORS_WIDTH=13 # 4 separators at "| " = 8 chars + 5 spaces in header - - # Calculate available width - AVAILABLE_WIDTH=$((TABLE_WIDTH - SEPARATORS_WIDTH)) - - # If terminal is extremely narrow, use minimum values - if [ $AVAILABLE_WIDTH -lt $((FIXED_MIN_WIDTH + MIN_TITLE_WIDTH)) ]; then - ID_WIDTH=$MIN_ID_WIDTH - STATUS_WIDTH=$MIN_STATUS_WIDTH - DATE_WIDTH=$MIN_DATE_WIDTH - TITLE_WIDTH=$MIN_TITLE_WIDTH - else - # Base width allocation - more balanced distribution - if [ $AVAILABLE_WIDTH -le 100 ]; then - # For narrower terminals (~80 columns) - ID_WIDTH=$MIN_ID_WIDTH - STATUS_WIDTH=$MIN_STATUS_WIDTH - DATE_WIDTH=$MIN_DATE_WIDTH - - # Give remaining space to title, but cap it at 50% of available width - MAX_TITLE_PCT=50 - MAX_TITLE_WIDTH=$((AVAILABLE_WIDTH * MAX_TITLE_PCT / 100)) - TITLE_WIDTH=$((AVAILABLE_WIDTH - MIN_ID_WIDTH - MIN_STATUS_WIDTH - MIN_DATE_WIDTH - MIN_DATE_WIDTH)) - - # Cap title width - [ $TITLE_WIDTH -gt $MAX_TITLE_WIDTH ] && TITLE_WIDTH=$MAX_TITLE_WIDTH - - # If we capped title, redistribute the space - REMAINING=$((AVAILABLE_WIDTH - ID_WIDTH - STATUS_WIDTH - DATE_WIDTH - DATE_WIDTH - TITLE_WIDTH)) - if [ $REMAINING -gt 0 ]; then - # Distribute remaining space to other columns - STATUS_WIDTH=$((STATUS_WIDTH + (REMAINING * 4 / 10))) - DATE_WIDTH=$((DATE_WIDTH + (REMAINING * 3 / 10))) - ID_WIDTH=$((ID_WIDTH + (REMAINING * 3 / 10))) - fi - else - # For wider terminals: - # Start with percentage-based allocation of available width - ID_WIDTH=$((AVAILABLE_WIDTH * 10 / 100)) # 10% - STATUS_WIDTH=$((AVAILABLE_WIDTH * 15 / 100)) # 15% - DATE_WIDTH=$((AVAILABLE_WIDTH * 10 / 100)) # 10% each (20% total for dates) - TITLE_WIDTH=$((AVAILABLE_WIDTH * 55 / 100)) # 55% - - # Ensure minimum widths - [ $ID_WIDTH -lt $MIN_ID_WIDTH ] && ID_WIDTH=$MIN_ID_WIDTH - [ $STATUS_WIDTH -lt $MIN_STATUS_WIDTH ] && STATUS_WIDTH=$MIN_STATUS_WIDTH - [ $DATE_WIDTH -lt $MIN_DATE_WIDTH ] && DATE_WIDTH=$MIN_DATE_WIDTH - [ $TITLE_WIDTH -lt $MIN_TITLE_WIDTH ] && TITLE_WIDTH=$MIN_TITLE_WIDTH - - # Make any final adjustments - TOTAL=$((ID_WIDTH + STATUS_WIDTH + DATE_WIDTH + DATE_WIDTH + TITLE_WIDTH)) - if [ $TOTAL -gt $AVAILABLE_WIDTH ]; then - # We allocated too much, reduce title width to fit - TITLE_WIDTH=$((TITLE_WIDTH - (TOTAL - AVAILABLE_WIDTH))) - fi - fi - fi - - # Function to truncate string with ellipsis if too long - truncate_string() { - local string="$1" - local width=$2 - - # Handle empty strings - if [ -z "$string" ]; then - echo "" - return - fi - - # If string length exceeds width, truncate and add ellipsis - if [ ${#string} -gt $width ]; then - # For very narrow columns, ensure we at least show something - if [ $width -le 5 ]; then - # Just use available width - echo "${string:0:$width}" - else - # Standard truncation with ellipsis - echo "${string:0:$((width-3))}..." - fi - else - # String fits within width, return as is - echo "$string" - fi - } - - # Display steel threads in a formatted table - printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ - "ID" "Title" "Status" "Created" "Completed" - printf "%-${ID_WIDTH}s-|-%-${TITLE_WIDTH}s-|-%-${STATUS_WIDTH}s-|-%-${DATE_WIDTH}s-|-%-${DATE_WIDTH}s\n" \ - "$(printf '%0.s-' $(seq 1 $ID_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $TITLE_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $STATUS_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $DATE_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $DATE_WIDTH))" - - # Collect data from individual ST files - declare -a st_data - - # Loop through all ST####.md files in all subdirectories - for file in $(find "$ST_DIR" -type f -name "ST[0-9][0-9][0-9][0-9].md"); do - if [ -f "$file" ]; then - # Extract ID from filename - ID=$(basename "$file" .md) - - # Check both YAML frontmatter and document body for metadata - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter for status - if [ -n "$YAML_STATUS" ]; then - ST_STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - ST_STATUS="$BODY_STATUS" - else - ST_STATUS="Not Started" - fi - - # Extract title from the first line - TITLE=$(grep "^# $ID:" "$file" | sed "s/^# $ID: //") - - # Extract created and completed dates - CREATED=$(grep -m 1 "^\- \*\*Created\*\*:" "$file" | sed "s/^\- \*\*Created\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - COMPLETED=$(grep -m 1 "^\- \*\*Completed\*\*:" "$file" | sed "s/^\- \*\*Completed\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # If created date is empty or placeholder, try YAML frontmatter - if [ -z "$CREATED" ] || [ "$CREATED" = "YYYY-MM-DD" ]; then - YAML_CREATED=$(grep -m 1 "^created:" "$file" | sed "s/^created: *//") - if [ -n "$YAML_CREATED" ] && [ "$YAML_CREATED" != "YYYYMMDD" ]; then - # Convert YYYYMMDD to YYYY-MM-DD if needed - if [[ "$YAML_CREATED" =~ ^[0-9]{8}$ ]]; then - CREATED="${YAML_CREATED:0:4}-${YAML_CREATED:4:2}-${YAML_CREATED:6:2}" - else - CREATED="$YAML_CREATED" - fi - else - CREATED=$(date '+%Y-%m-%d') - fi - fi - - # If completed date is empty or placeholder, try YAML frontmatter - if [ -z "$COMPLETED" ] || [ "$COMPLETED" = "YYYY-MM-DD" ]; then - YAML_COMPLETED=$(grep -m 1 "^completed:" "$file" | sed "s/^completed: *//") - if [ -n "$YAML_COMPLETED" ] && [ "$YAML_COMPLETED" != "null" ] && [ "$YAML_COMPLETED" != "~" ] && [ "$YAML_COMPLETED" != "YYYYMMDD" ]; then - # Convert YYYYMMDD to YYYY-MM-DD if needed - if [[ "$YAML_COMPLETED" =~ ^[0-9]{8}$ ]]; then - COMPLETED="${YAML_COMPLETED:0:4}-${YAML_COMPLETED:4:2}-${YAML_COMPLETED:6:2}" - else - COMPLETED="$YAML_COMPLETED" - fi - fi - fi - - # Skip if the requested status doesn't match - if [ -n "$STATUS" ] && [ "$STATUS" != "$ST_STATUS" ]; then - continue - fi - - # Add to data array - st_data+=("$ID|$TITLE|$ST_STATUS|$CREATED|$COMPLETED") - fi - done - - # Sort by ID in reverse order (newest first) - IFS=$'\n' sorted_data=($(sort -r <<<"${st_data[*]}")) - unset IFS - - # Process and display rows - for line in "${sorted_data[@]}"; do - ID=$(echo "$line" | cut -d'|' -f1) - TITLE=$(echo "$line" | cut -d'|' -f2) - ST_STATUS=$(echo "$line" | cut -d'|' -f3) - CREATED=$(echo "$line" | cut -d'|' -f4) - COMPLETED=$(echo "$line" | cut -d'|' -f5) - - # Truncate values if needed - ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) - TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) - STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) - CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) - COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) - - printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ - "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" - done - ;; - - "show") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - # If the ID doesn't start with ST, prepend it - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - # Find the steel thread file (could be in any subdirectory) - ST_FILE=$(get_st_path "$ST_ID") - - # Check if steel thread exists - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - # Display steel thread contents - cat "$ST_FILE" - ;; - - "edit") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - # If the ID doesn't start with ST, prepend it - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - # Find the steel thread file (could be in any subdirectory) - ST_FILE=$(get_st_path "$ST_ID") - - # Check if steel thread exists - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - # Get absolute path to the file - ABSOLUTE_PATH=$(cd "$(dirname "$ST_FILE")" && pwd)/$(basename "$ST_FILE") - - # Use the appropriate open command based on the OS - if [[ "$OSTYPE" == "darwin"* ]]; then - # macOS - open "$ABSOLUTE_PATH" - elif [[ "$OSTYPE" == "linux-gnu"* ]]; then - # Linux - if command -v xdg-open > /dev/null; then - xdg-open "$ABSOLUTE_PATH" - else - # Fallback to default editor - ${EDITOR:-vi} "$ABSOLUTE_PATH" - fi - elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "cygwin" || "$OSTYPE" == "win32" ]]; then - # Windows - start "$ABSOLUTE_PATH" - else - # Fallback to default editor - ${EDITOR:-vi} "$ABSOLUTE_PATH" - fi - - echo "Opening steel thread: $ST_ID" - ;; - - "sync") - # Parse options - WRITE_MODE=0 - WIDTH=80 - - while [ $# -gt 0 ]; do - case "$1" in - --write) WRITE_MODE=1; shift ;; - --width) shift; WIDTH="$1"; shift ;; - *) error "Unknown option: $1" ;; - esac - done - - # Paths - ST_DIR="stp/prj/st" - INDEX_FILE="$ST_DIR/steel_threads.md" - - # Basic validation - [ ! -d "$ST_DIR" ] && error "Steel threads directory not found" - [ ! -f "$INDEX_FILE" ] && error "Steel threads index file not found" - - if [ $WRITE_MODE -eq 1 ]; then - # Create temp files - TMP_FILE=$(mktemp) - LIST_OUTPUT=$(mktemp) - - # Get list output silently - "$0" list --width $WIDTH > "$LIST_OUTPUT" 2>/dev/null - - # Extract everything before the markers - sed -n '1,/<!-- BEGIN: STEEL_THREAD_INDEX -->/p' "$INDEX_FILE" > "$TMP_FILE" - - # Add the list output - cat "$LIST_OUTPUT" >> "$TMP_FILE" - - # Add everything after the markers - sed -n '/<!-- END: STEEL_THREAD_INDEX -->/,$p' "$INDEX_FILE" >> "$TMP_FILE" - - # Update file and clean up - mv "$TMP_FILE" "$INDEX_FILE" - rm "$LIST_OUTPUT" - - echo "Updated steel threads index file: $INDEX_FILE" - else - # For display, just run the list command - "$0" list --width "$WIDTH" - fi - ;; - - "organize") - # Parse options - WRITE_MODE=0 - while [ $# -gt 0 ]; do - case "$1" in - --write) - WRITE_MODE=1 - shift - ;; - *) - error "Unknown option: $1" - ;; - esac - done - - BASE_DIR="stp/prj/st" - - # Create required directories if they don't exist - mkdir -p "$BASE_DIR/COMPLETED" "$BASE_DIR/NOT-STARTED" "$BASE_DIR/CANCELLED" - - # Find all steel thread files in the root directory - ST_FILES=$(find "$BASE_DIR" -maxdepth 1 -name "ST[0-9][0-9][0-9][0-9].md") - - # Process each steel thread file - for file in $ST_FILES; do - # Skip if not a file - if [ ! -f "$file" ]; then - continue - fi - - # Extract ID from filename - ID=$(basename "$file" .md) - - # Check both YAML frontmatter and document body for status - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter for status - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo "Processing file: $file" - echo " File: $ID - Status: $STATUS" - - # Get the target location for this file - TARGET_FILE=$(get_st_path "$ID" "$STATUS") - - # If we're in write mode and the target location is different, move the file - if [ $WRITE_MODE -eq 1 ] && [ "$file" != "$TARGET_FILE" ]; then - # Create target directory if it doesn't exist (should already exist, but just in case) - mkdir -p "$(dirname "$TARGET_FILE")" - - # Move the file - mv "$file" "$TARGET_FILE" - echo "Moving $ID to $(dirname "$TARGET_FILE")" - else - # Just display what would be done - if [ "$file" != "$TARGET_FILE" ]; then - echo "Would move $ID to $(dirname "$TARGET_FILE")" - else - echo "$ID stays in main directory" - fi - fi - done - - # Also check subdirectories to make sure files are in the right place - for subdir in "$BASE_DIR"/*; do - if [ -d "$subdir" ] && [[ "$subdir" != "$BASE_DIR/steel_threads.md" ]]; then - SUBDIR_NAME=$(basename "$subdir") - - echo "Processing subdirectory: $subdir" - - # Find all steel thread files in this subdirectory - SUB_ST_FILES=$(find "$subdir" -maxdepth 1 -name "ST[0-9][0-9][0-9][0-9].md") - - for file in $SUB_ST_FILES; do - # Skip if not a file - if [ ! -f "$file" ]; then - continue - fi - - # Extract ID from filename - ID=$(basename "$file" .md) - - echo " Processing file in subdirectory: $file" - - # Check both YAML frontmatter and document body for status - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter for status - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo " File: $ID - Status: $STATUS" - - # Get the target location for this file - TARGET_FILE=$(get_st_path "$ID" "$STATUS") - - # If we're in write mode and the target location is different, move the file - if [ $WRITE_MODE -eq 1 ] && [ "$file" != "$TARGET_FILE" ]; then - # Create target directory if it doesn't exist (should already exist, but just in case) - mkdir -p "$(dirname "$TARGET_FILE")" - - # Move the file - mv "$file" "$TARGET_FILE" - echo "Moving $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_FILE")")" - else - # Just display what would be done - if [ "$file" != "$TARGET_FILE" ]; then - echo "Would move $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_FILE")")" - fi - fi - done - fi - done - - # If we're in write mode, update the index file after organizing - if [ $WRITE_MODE -eq 1 ]; then - "$0" sync --write - echo "Updated steel threads index." - fi - ;; - - "help") - usage - ;; - - *) - error "Unknown command: $ST_COMMAND" - usage - ;; -esac \ No newline at end of file diff --git a/stp/bin/stp_upgrade b/stp/bin/stp_upgrade deleted file mode 100755 index edae931..0000000 --- a/stp/bin/stp_upgrade +++ /dev/null @@ -1,420 +0,0 @@ -#!/bin/bash -# stp_upgrade - Upgrade STP files to the latest format -# Usage: stp upgrade [--force] - -# Exit on error -set -e - -# Current STP version -CURRENT_VERSION="1.2.1" - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to display usage information -usage() { - cat << EOF -Usage: stp upgrade [--force] [--organize] - -Upgrade STP files to the latest format. - -This command scans all STP files and brings them up to date with the latest version. -It adds or updates metadata and ensures all files follow the current format standards. - -Options: - --force Force upgrade even for major version differences - --organize Organize steel thread files into status subdirectories after upgrade - -Examples: - stp upgrade - stp upgrade --force - stp upgrade --organize - stp upgrade --force --organize -EOF - exit 1 -} - -# Parse options -FORCE=0 -ORGANIZE=0 -while [ "$#" -gt 0 ]; do - case "$1" in - --force) - FORCE=1 - shift - ;; - --organize) - ORGANIZE=1 - shift - ;; - --help) - usage - ;; - *) - error "Unknown option: $1" - ;; - esac -done - -# Load project configuration if available -if [ -f stp/.config/config ]; then - source stp/.config/config -elif [ -f .stp-config ]; then - # For backward compatibility - source .stp-config -fi - -echo "Starting STP upgrade process..." -echo "Current STP version: $CURRENT_VERSION" -echo "" - -# Function to compare versions -# Returns: 0 if equal, 1 if version1 > version2, 2 if version1 < version2 -compare_versions() { - local version1="$1" - local version2="$2" - - if [ "$version1" = "$version2" ]; then - return 0 - fi - - local IFS=. - local v1=($version1) - local v2=($version2) - - # Major version comparison - if [ "${v1[0]:-0}" -gt "${v2[0]:-0}" ]; then - return 1 - elif [ "${v1[0]:-0}" -lt "${v2[0]:-0}" ]; then - return 2 - fi - - # Minor version comparison - if [ "${v1[1]:-0}" -gt "${v2[1]:-0}" ]; then - return 1 - elif [ "${v1[1]:-0}" -lt "${v2[1]:-0}" ]; then - return 2 - fi - - # Patch version comparison - if [ "${v1[2]:-0}" -gt "${v2[2]:-0}" ]; then - return 1 - elif [ "${v1[2]:-0}" -lt "${v2[2]:-0}" ]; then - return 2 - fi - - # Equal - return 0 -} - -# Function to extract version from a file -get_file_version() { - local file="$1" - local version="" - - if [ -f "$file" ]; then - # Try to extract stp_version from YAML frontmatter - version=$(grep -m 1 "^stp_version:" "$file" | sed "s/^stp_version: *//") - fi - - # If no version found, assume "0.0.0" (pre-versioning) - if [ -z "$version" ]; then - version="0.0.0" - fi - - echo "$version" -} - -# Function to add/update YAML frontmatter in a file -update_file_frontmatter() { - local file="$1" - local temp_file="${file}.tmp" - - if [ -f "$file" ]; then - # Check if file already has YAML frontmatter - if grep -q "^---" "$file"; then - # Extract information from the file content for comprehensive frontmatter update - local title=$(grep -m 1 "^# " "$file" | sed "s/^# //") - local author=$(grep -m 1 "^\- \*\*Author\*\*:" "$file" | sed "s/^\- \*\*Author\*\*: //") - local status=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //") - local created=$(grep -m 1 "^\- \*\*Created\*\*:" "$file" | sed "s/^\- \*\*Created\*\*: //") - local completed=$(grep -m 1 "^\- \*\*Completed\*\*:" "$file" | sed "s/^\- \*\*Completed\*\*: //") - - # Set default values if not found in the file - if [ -z "$status" ]; then - status="Not Started" - fi - - if [ -z "$created" ]; then - created=$(date '+%Y%m%d') - elif [[ "$created" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then - # Convert YYYY-MM-DD to YYYYMMDD if needed - created=$(echo "$created" | tr -d '-') - fi - - if [ -n "$completed" ] && [ "$completed" != "YYYY-MM-DD" ] && [ ! -z "$(echo "$completed" | tr -d '[:space:]')" ]; then - if [[ "$completed" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then - # Convert YYYY-MM-DD to YYYYMMDD if needed - completed=$(echo "$completed" | tr -d '-') - fi - else - completed="" - fi - - if [ -z "$author" ]; then - author="${STP_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" - fi - - # Create a new complete frontmatter with all the necessary fields - echo "---" > "$temp_file" - echo "verblock: \"$(date '+%d %b %Y'):v0.1: $author - Updated via STP upgrade\"" >> "$temp_file" - echo "stp_version: $CURRENT_VERSION" >> "$temp_file" - echo "status: $status" >> "$temp_file" - echo "created: $created" >> "$temp_file" - echo "completed: $completed" >> "$temp_file" - echo "---" >> "$temp_file" - - # Add the rest of the file content, skipping the old frontmatter - awk ' - BEGIN { in_frontmatter = 0; skip = 0; } - /^---/ { - if (in_frontmatter == 0) { - in_frontmatter = 1; - skip = 1; - next; - } else { - in_frontmatter = 0; - skip = 0; - next; - } - } - !skip { print; } - ' "$file" >> "$temp_file" - else - # Extract information from the file content to create proper frontmatter - local title=$(grep -m 1 "^# " "$file" | sed "s/^# //") - local author=$(grep -m 1 "^\- \*\*Author\*\*:" "$file" | sed "s/^\- \*\*Author\*\*: //") - local status=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //") - local created=$(grep -m 1 "^\- \*\*Created\*\*:" "$file" | sed "s/^\- \*\*Created\*\*: //") - local completed=$(grep -m 1 "^\- \*\*Completed\*\*:" "$file" | sed "s/^\- \*\*Completed\*\*: //") - - # Set default values if not found in the file - if [ -z "$status" ]; then - status="Not Started" - fi - - if [ -z "$created" ]; then - created=$(date '+%Y%m%d') - elif [[ "$created" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then - # Convert YYYY-MM-DD to YYYYMMDD if needed - created=$(echo "$created" | tr -d '-') - fi - - if [ -n "$completed" ] && [ "$completed" != "YYYY-MM-DD" ] && [ ! -z "$(echo "$completed" | tr -d '[:space:]')" ]; then - if [[ "$completed" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then - # Convert YYYY-MM-DD to YYYYMMDD if needed - completed=$(echo "$completed" | tr -d '-') - fi - else - completed="" - fi - - if [ -z "$author" ]; then - author="${STP_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" - fi - - # Create a new complete frontmatter with all the necessary fields - echo "---" > "$temp_file" - echo "verblock: \"$(date '+%d %b %Y'):v0.1: $author - Updated via STP upgrade\"" >> "$temp_file" - echo "stp_version: $CURRENT_VERSION" >> "$temp_file" - echo "status: $status" >> "$temp_file" - echo "created: $created" >> "$temp_file" - echo "completed: $completed" >> "$temp_file" - echo "---" >> "$temp_file" - - # Add the original file content - cat "$file" >> "$temp_file" - fi - - # Replace the original file - mv "$temp_file" "$file" - echo "Updated: $file" - fi -} - -# Function to upgrade section markers in steel_threads.md -upgrade_section_markers() { - local file="stp/prj/st/steel_threads.md" - local temp_file=$(mktemp) - - if [ -f "$file" ]; then - # Check if file already has section markers - if grep -q "BEGIN: STEEL_THREAD_INDEX" "$file"; then - echo "Section markers already present in $file" - else - # Add section markers - awk ' - /^## Index/ { - print; - print ""; - print "<!-- BEGIN: STEEL_THREAD_INDEX -->"; - in_index = 1; - next; - } - /^##/ && in_index { - print "<!-- END: STEEL_THREAD_INDEX -->"; - print ""; - in_index = 0; - print; - next; - } - { print; } - END { - if (in_index) { - print "<!-- END: STEEL_THREAD_INDEX -->"; - } - } - ' "$file" > "$temp_file" - - # Replace the original file - mv "$temp_file" "$file" - echo "Added section markers to $file" - fi - fi -} - -# Scan directories for STP files to upgrade -echo "Scanning for STP files to upgrade..." - -# Check for old usage_rules.md file and rename to usage-rules.md -if [ -f "stp/eng/usage_rules.md" ]; then - echo "Found old usage_rules.md file, renaming to usage-rules.md..." - mv "stp/eng/usage_rules.md" "stp/eng/usage-rules.md" - echo "Renamed: stp/eng/usage_rules.md -> stp/eng/usage-rules.md" -fi - -# Check for steel threads directory -if [ -d "stp/prj/st" ]; then - # Check if we need to migrate from files to directories (1.2.0 -> 1.2.1) - NEEDS_MIGRATION=0 - if ls stp/prj/st/ST*.md 1> /dev/null 2>&1 || ls stp/prj/st/*/ST*.md 1> /dev/null 2>&1; then - # Check if any .md files exist (old structure) - for st_file in $(find stp/prj/st -name "ST[0-9][0-9][0-9][0-9].md" -type f 2>/dev/null); do - NEEDS_MIGRATION=1 - break - done - fi - - if [ $NEEDS_MIGRATION -eq 1 ]; then - echo "" - echo "Detected steel thread files in old format (single files)." - echo "Migration to directory structure is required for v1.2.1." - echo "" - read -p "Migrate steel threads to directory structure? (Y/n) " -n 1 -r - echo "" - if [[ $REPLY =~ ^[Yy]$ ]] || [[ -z $REPLY ]]; then - # Run migration script - if [ -x "./stp/bin/migrate_st_to_dirs" ]; then - ./stp/bin/migrate_st_to_dirs - elif [ -n "$STP_HOME" ] && [ -x "$STP_HOME/stp/bin/migrate_st_to_dirs" ]; then - "$STP_HOME/stp/bin/migrate_st_to_dirs" - else - error "Migration script not found: migrate_st_to_dirs" - fi - echo "" - else - echo "Migration skipped. Note: Some STP commands may not work correctly with the old format." - echo "" - fi - fi - - # Upgrade steel_threads.md - echo "Checking steel_threads.md..." - upgrade_section_markers - - # Process all steel thread directories (new structure) - echo "Upgrading steel thread files..." - if ls stp/prj/st/ST*.md 1> /dev/null 2>&1; then - for st_file in stp/prj/st/ST*.md; do - if [ -f "$st_file" ]; then - file_version=$(get_file_version "$st_file") - echo "Processing $st_file (current version: $file_version)" - - # Check if the file contains the necessary metadata fields - missing_metadata=0 - if ! grep -q "^\- \*\*Status\*\*:" "$st_file"; then - echo " Missing Status field in file" - missing_metadata=1 - fi - if ! grep -q "^\- \*\*Created\*\*:" "$st_file"; then - echo " Missing Created field in file" - missing_metadata=1 - fi - if ! grep -q "^\- \*\*Completed\*\*:" "$st_file"; then - echo " Missing Completed field in file" - missing_metadata=1 - fi - if ! grep -q "^\- \*\*Author\*\*:" "$st_file"; then - echo " Missing Author field in file" - missing_metadata=1 - fi - - # Force update if metadata is missing - if [ $missing_metadata -eq 1 ]; then - echo " Updating file to add missing metadata fields..." - update_file_frontmatter "$st_file" - continue - fi - - # Compare versions - compare_versions "$CURRENT_VERSION" "$file_version" - comparison=$? - - if [ $comparison -eq 0 ]; then - echo " Already at latest version, no update needed." - elif [ $comparison -eq 1 ]; then - # Current version is newer - major_current=$(echo $CURRENT_VERSION | cut -d. -f1) - major_file=$(echo $file_version | cut -d. -f1) - - if [ $major_current -gt $major_file ] && [ $FORCE -eq 0 ]; then - echo " Warning: File uses major version $major_file, current is $major_current." - echo " Use --force to upgrade this file." - else - update_file_frontmatter "$st_file" - fi - else - # File version is newer - echo " Warning: File version ($file_version) is newer than current version ($CURRENT_VERSION)." - echo " This may indicate the file was created with a newer version of STP." - fi - fi - done - else - echo "No ST*.md files found in stp/prj/st/ directory" - fi - - # Run the organization command to move files based on status (only if --organize flag is used) - if [ $ORGANIZE -eq 1 ]; then - if [ -x "./stp/bin/stp_st" ]; then - echo "" - echo "Organizing steel thread files by status..." - ./stp/bin/stp_st organize --write - elif [ -n "$STP_HOME" ] && [ -x "$STP_HOME/stp/bin/stp_st" ]; then - echo "" - echo "Organizing steel thread files by status..." - "$STP_HOME/stp/bin/stp_st" organize --write - fi - else - echo "" - echo "Note: Steel thread files were not reorganized. Use --organize flag to move files to status subdirectories." - fi -else - echo "No stp/prj/st directory found. Steel threads upgrade skipped." -fi - -echo "" -echo "STP upgrade complete." \ No newline at end of file diff --git a/stp/bin/update_frontmatter.sh b/stp/bin/update_frontmatter.sh deleted file mode 100755 index 156f46c..0000000 --- a/stp/bin/update_frontmatter.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# update_frontmatter.sh - Updates frontmatter in STP files -# This script ensures all STP files include the stp_version field in YAML frontmatter -# Updated for v1.2.1 directory structure - -# Exit on error -set -e - -# Current STP version -CURRENT_VERSION="1.2.1" - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to add/update YAML frontmatter in a file -update_file_frontmatter() { - local file="$1" - local temp_file="${file}.tmp" - - if [ -f "$file" ]; then - echo "Updating $file" - - # Check if file already has YAML frontmatter - if grep -q "^---" "$file"; then - # Update existing frontmatter - awk ' - BEGIN { in_frontmatter = 0; has_version = 0; printed_version = 0; } - /^---/ { - if (in_frontmatter == 0) { - in_frontmatter = 1; - print "---"; - next; - } else { - in_frontmatter = 0; - if (!has_version) { - print "stp_version: '"$CURRENT_VERSION"'"; - printed_version = 1; - } - print "---"; - next; - } - } - in_frontmatter && /^stp_version:/ { - print "stp_version: '"$CURRENT_VERSION"'"; - has_version = 1; - printed_version = 1; - next; - } - { print; } - ' "$file" > "$temp_file" - else - # Add new frontmatter - echo "---" > "$temp_file" - echo "stp_version: $CURRENT_VERSION" >> "$temp_file" - - # Try to extract author information from the file - local author=$(grep -m 1 "^\- \*\*Author\*\*:" "$file" | sed "s/^\- \*\*Author\*\*: //") - if [ -z "$author" ]; then - author="STP System" - fi - - # Add verblock if not present - echo "verblock: \"$(date '+%d %b %Y'):v0.1: $author - Added metadata\"" >> "$temp_file" - echo "---" >> "$temp_file" - cat "$file" >> "$temp_file" - fi - - # Replace the original file - mv "$temp_file" "$file" - fi -} - -# Scan for files to update -echo "Starting frontmatter update process..." -echo "Current STP version: $CURRENT_VERSION" -echo "" - -# Update files in stp/usr/ -echo "Updating user documentation..." -for file in stp/usr/*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi -done - -# Update files in stp/eng/ -echo "Updating engineering documentation..." -for file in stp/eng/tpd/*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi -done - -# Update files in stp/llm/ -echo "Updating LLM documentation..." -for file in stp/llm/*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi -done - -# Update files in stp/prj/ -echo "Updating project documentation..." -for file in stp/prj/*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi -done - -# Update steel threads (now in directories) -echo "Updating steel threads..." -# Check main directory -for dir in stp/prj/st/ST*/; do - if [ -d "$dir" ]; then - # Update all .md files in the directory - for file in "$dir"*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi - done - fi -done - -# Also check status subdirectories -for status_dir in stp/prj/st/COMPLETED/ stp/prj/st/NOT-STARTED/ stp/prj/st/CANCELLED/; do - if [ -d "$status_dir" ]; then - for dir in "$status_dir"ST*/; do - if [ -d "$dir" ]; then - for file in "$dir"*.md; do - if [ -f "$file" ]; then - update_file_frontmatter "$file" - fi - done - fi - done - fi -done - -# Update steel_threads.md separately -update_file_frontmatter "stp/prj/st/steel_threads.md" - -# Update steel threads index -if [ -x "./stp/bin/stp_st" ]; then - echo "" - echo "Running sync to update steel_threads.md..." - ./stp/bin/stp_st sync --write -fi - -echo "" -echo "Frontmatter update complete." \ No newline at end of file diff --git a/stp/doc/blog/0001-introduction-to-stp.md b/stp/doc/blog/0001-introduction-to-stp.md deleted file mode 100644 index 8a429e3..0000000 --- a/stp/doc/blog/0001-introduction-to-stp.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: "Introduction to STP" -date: "2025-07-08" -author: "Matthew Sinclair" -draft: false -word_count: 1623 ---- - -# Introduction to STP: A Better Way to Build Software - -If you've ever lost track of why a piece of code exists, struggled to onboard a new team member, or watched an LLM confidently solve the wrong problem, you understand the cost of lost intention in software development. In our [previous post](./0000-motivation-for-stp.md), we explored why capturing and preserving intention is crucial for modern development, especially when collaborating with AI. - -Today, I want to introduce you to the Steel Thread Process (STP) – a practical solution to the intention problem. STP isn't another heavyweight methodology or a complex framework. It's a lightweight system that enhances your existing workflow with intention-aware structure, making both human and AI collaboration more effective. - -## Building on the Intention Foundation - -We established that the fundamental challenge in modern development isn't just building software – it's ensuring that what we build aligns with why we're building it. This challenge intensifies when working with LLMs, which excel at pattern matching but lack understanding of underlying purpose. - -STP addresses this challenge by making intention explicit and structural. Rather than hoping developers will document the "why" or expecting LLMs to infer our goals, STP builds intention capture into the development workflow itself. - -The shift from theoretical understanding to practical implementation happens through three key innovations: -1. **Steel threads** that encapsulate both intent and implementation -2. **Structured templates** that prompt for intention at every stage -3. **Integration with task management** that maintains the intent-to-execution link - -## What is STP? - -The Steel Thread Process is a lightweight methodology for structuring both development and documentation around clearly captured intentions. At its heart, STP is surprisingly simple: shell scripts + markdown templates + task tracking = intention-aware development. - -Let me break this down: - -**Shell Scripts**: A collection of simple bash scripts that automate common tasks: -- `stp st new` - Create a new steel thread with intention-capturing template -- `stp st list` - View all steel threads and their status -- `stp bl` - Integrate with Backlog.md for task management -- `stp status` - Synchronise steel thread status with task completion - -**Markdown Templates**: Structured documents that prompt for intention: -- Steel thread templates that start with "why" before "what" -- Technical design documents with intention sections -- User guides that explain purpose alongside usage - -**Task Tracking**: Fine-grained visibility through Backlog.md integration: -- Each steel thread can have multiple associated tasks -- Tasks track the detailed work while threads maintain the big picture -- Automatic status updates based on task completion - -The magic happens when these simple components work together. A steel thread captures your intention, tasks track your implementation, and templates ensure nothing important gets lost along the way. - -Importantly, STP is designed to work alongside your existing practices. Whether you use Agile, Waterfall, or something in between, STP adds intention-awareness without disrupting your workflow. It's an enhancement, not a replacement. - -## Core Principles of STP - -STP is built on eight core principles that guide its design and implementation: - -### 1. Documentation as a First-Class Citizen -In STP, documentation isn't something you do after coding – it's an integral part of the development process. Every steel thread starts with documentation that captures intention, and this documentation evolves alongside the code. When documentation drives development, both humans and LLMs have the context they need to make good decisions. - -### 2. Intent Capture Throughout the Lifecycle -Intention isn't just captured at the beginning – it's maintained and referenced throughout development. From initial conception through implementation to future maintenance, the "why" remains visible and relevant. This creates a traceable lineage from business need to technical implementation. - -### 3. Incremental Development Through Steel Threads -Rather than tackling entire features or epics, STP encourages breaking work into steel threads – complete, minimal paths through your system. Each thread can be understood, implemented, and validated independently, making development more manageable and progress more visible. - -### 4. Task Tracking Linked to Steel Threads -While steel threads capture the big picture intention, individual tasks track the detailed work. STP's integration with Backlog.md creates a two-level system: strategic intent at the thread level, tactical execution at the task level. This separation keeps both perspectives clear and connected. - -### 5. Process-Agnostic Compatibility -STP doesn't dictate how you should develop software. Whether you're using Scrum, Kanban, or any other methodology, STP layers intention-awareness on top. It's designed to enhance, not replace, your existing workflow. - -### 6. Lightweight Enhancement -The entire STP system consists of simple shell scripts and markdown templates. No complex tools to learn, no vendor lock-in, no heavyweight processes. You can adopt STP incrementally, starting with a single steel thread and expanding as you see value. - -### 7. Flexibility to Match Your Workflow -Every team works differently. STP's templates and processes are starting points, not rigid requirements. Modify templates, adjust workflows, and make STP work for your specific needs while maintaining the core principle of intention capture. - -### 8. Integration with Modern LLM Tooling -STP was designed in the age of AI-assisted development. Its structured approach to intention and documentation makes it particularly effective when working with LLMs, providing the context and clarity these tools need to be truly helpful. - -## The Steel Thread Concept - -While we'll dive deep into steel threads in the next post, it's worth understanding the basic concept as it's central to how STP works. - -A steel thread is a complete, minimal path through your system that delivers value. Think of it as the thinnest possible slice that: -- Solves a real problem -- Can be implemented independently -- Provides learning about the system -- Captures clear intention - -Here's how the STP workflow typically looks: - -``` -┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ -│ Intention │ │ Steel Thread │ │ Tasks │ -│ Capture │────▶│ Creation │────▶│ Definition │ -│ │ │ (stp st new) │ │ (stp bl) │ -└─────────────────┘ └──────────────────┘ └─────────────────┘ - │ │ │ - │ ▼ ▼ - │ ┌──────────────────┐ ┌─────────────────┐ - │ │ Documentation │ │ Implementation │ - │ │ Templates │────▶│ (coding) │ - │ └──────────────────┘ └─────────────────┘ - │ │ │ - │ ▼ ▼ - │ ┌──────────────────┐ ┌─────────────────┐ - └─────────────▶│ Review & │◀────│ Testing │ - │ Validation │ │ │ - └──────────────────┘ └─────────────────┘ -``` - -This differs from traditional work organisation where tasks often lose connection to their original purpose. In STP, every task links back to a steel thread, and every steel thread explicitly captures intention. - -## Benefits of STP - -Adopting STP brings concrete benefits to your development process: - -### Better Alignment Between Intent and Implementation -When every piece of code traces back to a clearly stated intention, misalignment becomes obvious and correctable. Reviews shift from "Is this good code?" to "Does this serve our purpose?" – a much more valuable question. - -### Documentation That Stays Up-to-Date -Because documentation drives development rather than following it, it naturally stays current. The templates prompt for updates at each stage, and the documentation evolves alongside the implementation. No more archaeology to understand why code exists. - -### Fine-Grained Visibility Into Work Progress -The two-tier system of steel threads and tasks provides both strategic and tactical visibility. Stakeholders can track high-level progress through steel threads, while developers manage day-to-day work through linked tasks. Everyone gets the view they need. - -### Automatic Status Synchronization -As tasks complete, steel thread status updates automatically. This isn't just convenient – it ensures that high-level tracking reflects ground truth. No more status meetings to figure out where things really stand. - -### Improved Onboarding Experience -New team members can understand not just what the code does, but why it exists. Each steel thread tells a complete story from intention to implementation. This context dramatically reduces the time needed to become productive. - -### Enhanced Collaboration with LLMs -When working with AI assistants, the structured intention and context in STP documentation provides exactly what LLMs need to give relevant, aligned suggestions. Instead of guessing at your goals, they can reference explicit intentions. - -### More Efficient Development Process -While it might seem like additional overhead, STP actually streamlines development by: -- Reducing rework from misunderstood requirements -- Eliminating redundant status tracking -- Preventing scope creep through clear intentions -- Enabling parallel work through independent steel threads - -## What's Coming in This Blog Series - -This introduction has given you a high-level view of STP, but there's much more to explore. Here's what's coming in the rest of this series: - -**[The Steel Thread Methodology](./0002-the-steel-thread-methodology.md)**: A deep dive into steel threads – what they are, how to create them, and why they're more effective than traditional work organisation. - -**[Intent Capture in Software Development](./0003-intent-capture-in-software-development.md)**: Practical techniques for capturing, preserving, and leveraging intention throughout your development process. - -**[LLM Collaboration with STP](./0004-llm-collaboration-with-stp.md)**: How STP's structure makes AI assistance more effective, with real examples of improved LLM interactions. - -**[Getting Started with STP](./0005-getting-started-with-stp.md)**: A practical guide to implementing STP in your project, including installation, configuration, and your first steel thread. - -**[Next Steps and Future Work](./0006-next-steps-and-future-work.md)**: Where STP is heading and how you can contribute to its development. - -## Ready to Transform Your Development Process? - -STP offers a pragmatic solution to the intention problem in modern software development. By making intention explicit and structural, it bridges the gap between why we build and what we build, creating better outcomes for both human and AI collaboration. - -In the next post, we'll explore the steel thread methodology in detail, showing you exactly how to break down work in a way that preserves intention while enabling incremental progress. - -[Continue to: The Steel Thread Methodology →](./0002-the-steel-thread-methodology.md) diff --git a/stp/doc/blog/0004-llm-collaboration-with-stp.md b/stp/doc/blog/0004-llm-collaboration-with-stp.md deleted file mode 100644 index 1d61471..0000000 --- a/stp/doc/blog/0004-llm-collaboration-with-stp.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: "LLM Collaboration with STP" -date: "2025-07-08" -author: "Matthew Sinclair" -draft: false -word_count: 2274 ---- - -# LLM Collaboration with STP: Multiplying Development Capabilities - -We've built a foundation of [captured intention](./0003-intent-capture-in-software-development.md) using [steel threads](./0002-the-steel-thread-methodology.md). Now we explore how this foundation transforms collaboration with Large Language Models from hit-or-miss assistance into reliable development partnership. - -STP wasn't designed in isolation – it emerged from real-world experience working with LLMs like Claude. Every design decision, from markdown templates to the "Preamble to Claude" in our technical documentation, optimises for effective human-AI collaboration. Today, we'll explore how STP multiplies your development capabilities when working with AI assistants. - -## Intention-First LLM Collaboration - -Remember our [fundamental challenge](./0000-motivation-for-stp.md): LLMs perform sophisticated pattern matching without true understanding. They generate plausible code that might completely miss your actual needs. STP solves this by making intention explicit and structural. - -The transformation is dramatic: - -**Without STP**: "Build a caching system" → LLM guesses at requirements → Generic solution - -**With STP**: Steel thread with clear objectives → LLM understands constraints → Purpose-built solution - -### The Multiplier Effect - -When LLMs have access to clear intentions: -- **Context becomes meaningful** rather than just available -- **Suggestions align** with your actual goals, not assumed ones -- **Iterations decrease** because the LLM starts closer to the target -- **Quality improves** through understanding trade-offs and constraints - -This isn't about better prompts – it's about better context. STP provides that context systematically. - -## The LLM Collaboration Challenge - -Even powerful LLMs face fundamental challenges in software development collaboration: - -### Context Window Constraints - -LLMs have finite context windows. Dumping your entire codebase exceeds these limits and creates noise. The challenge: How do you provide enough context without overwhelming the model? - -### Information Overload - -More context isn't always better. LLMs can get lost in irrelevant details, missing the crucial information buried in thousands of lines of code. Quality beats quantity. - -### The Stale Context Problem - -Yesterday's context might mislead today's decisions. As code evolves, old assumptions become dangerous. Static documentation quickly becomes a liability rather than an asset. - -### Project Structure Complexity - -Explaining how different parts of your system interact requires more than showing code. LLMs need to understand relationships, dependencies, and architectural decisions. - -### Session Continuity - -Each new conversation starts fresh. Without systematic context management, you waste time re-explaining your project, and the LLM loses valuable understanding built in previous sessions. - -## How STP Is Designed for LLM Collaboration - -STP addresses each collaboration challenge through deliberate design choices: - -### The "Preamble to Claude" - -Our Technical Product Design starts with explicit instructions for LLMs: - -```markdown -## Preamble to Claude - -This document is a Technical Product Design (TPD) for the Steel Thread Process (STP) system. When processing this document, please understand: - -1. This is a comprehensive technical specification... -2. The system is designed to facilitate collaboration between developers and LLMs... -``` - -This isn't just documentation – it's a handshake protocol between human intent and AI understanding. - -### Structured Documentation That Fits LLM Thinking - -STP templates mirror how LLMs process information: -- **Clear hierarchies** that LLMs can navigate -- **Consistent patterns** that reduce parsing complexity -- **Explicit sections** for objectives, context, approach -- **Metadata frontmatter** for quick classification - -### Just-in-Time Context Loading - -Instead of overwhelming LLMs with everything, STP enables focused context: - -```bash -# Load specific steel thread context -$ cat stp/prj/st/ST0042.md - -# Show current tasks for that thread -$ stp task list ST0042 - -# Check implementation status -$ stp status show ST0042 -``` - -Each command provides exactly the context needed for the current task. - -### The Information Flow Architecture - -``` -WIP (Current Focus) - │ - ├──▶ Steel Threads (Intent & Strategy) - │ │ - │ └──▶ Tasks (Granular Work) - │ - └──▶ Journal (Historical Context) -``` - -This flow ensures LLMs always have: -1. Current focus (WIP) -2. Strategic context (Steel Threads) -3. Tactical details (Tasks) -4. Historical decisions (Journal) - -## Context Management Strategies - -Effective LLM collaboration requires strategic context management. Here's how STP enables it: - -### Start with WIP (Work In Progress) - -The WIP document acts as a conversation starter: - -```markdown -# Work in Progress - -## Current Focus -Implementing authentication system (ST0042) -- Decided on JWT tokens over sessions -- Need to handle refresh token rotation -- Integrating with existing user service - -## Blockers -- Unclear how to handle multi-device login -``` - -This immediately orients the LLM to your current state and challenges. - -### Use Steel Threads as Context Containers - -Each steel thread provides bounded context: - -```bash -# Share a complete context unit (main info file) -$ cat stp/prj/st/ST0042/info.md | pbcopy -# Now paste into LLM conversation - -# Or share specific aspects -$ cat stp/prj/st/ST0042/design.md | pbcopy # For design discussions -$ cat stp/prj/st/ST0042/impl.md | pbcopy # For implementation details -``` - -The LLM receives: -- Clear objectives -- Relevant constraints -- Design decisions -- Current progress - -### Progressive Context Loading - -Start minimal, add detail as needed: - -1. **Initial**: "Working on ST0042 - Authentication System" -2. **If needed**: Share the steel thread info.md or specific files -3. **For specifics**: Show relevant task details -4. **For history**: Reference Backlog task history - -This prevents context overload while ensuring completeness. - -### Task Status as Progress Indicators - -```bash -$ stp status show ST0042 -Steel Thread: ST0042 -Current Status: In Progress -Task Summary: - Total Tasks: 8 - - Done: 5 - - In Progress: 1 - - Todo: 2 -``` - -LLMs immediately understand what's complete and what needs attention. - -## Templates and Structure that Enhance LLM Effectiveness - -STP templates aren't arbitrary – they're designed to match how LLMs process information. - -### Why Structure Matters to LLMs - -LLMs excel at pattern recognition. STP v1.2.1+'s directory structure enhances this: - -``` -ST0042/ -├── info.md # LLM starts here for context -├── design.md # Share for architectural discussions -├── impl.md # Reference during coding -├── tasks.md # Track progress -└── results.md # Document outcomes -``` - -Each file serves a specific purpose in LLM conversations: - -```markdown -# info.md - Primary context ---- -verblock: "08 Mar 2025:v0.1: Author - Initial version" -stp_version: 1.2.1 -status: In Progress -created: 20250308 ---- -# ST0042: Authentication System - -## Objective -[LLMs immediately understand this is the goal] - -## Context -[LLMs know to find background information here] - -## Approach -[LLMs expect implementation strategy here] -``` - -The predictable structure reduces cognitive load and improves comprehension. - -### Frontmatter Metadata - -YAML frontmatter provides machine-readable context: - -```yaml ---- -status: In Progress # LLM knows work is active -created: 20250308 # LLM understands timeline -completed: # LLM sees this isn't done -author: Jane Smith # LLM knows who to reference -dependencies: [ST0038] # LLM understands relationships ---- -``` - -This metadata helps LLMs make contextual decisions without parsing prose. - -### Section Organisation for LLM Reasoning - -STP sections follow a logical flow that mirrors problem-solving: - -1. **Objective**: What are we trying to achieve? -2. **Context**: Why does this matter? -3. **Approach**: How will we solve it? -4. **Tasks**: What specific work is needed? -5. **Implementation Notes**: What have we learned? -6. **Results**: What was the outcome? - -This progression helps LLMs understand not just the current state but the journey. - -### Consistent Formatting Patterns - -STP uses consistent markers that LLMs can recognise: - -- `## Section Headers` for major divisions -- `- [ ] Task items` for work tracking -- `` ```language `` for code blocks -- `**Bold**` for emphasis -- `[Links](./file.md)` for relationships - -These patterns become navigational aids for LLM comprehension. - -## The Documentation-Implementation Feedback Loop - -STP creates a virtuous cycle where documentation and implementation reinforce each other, with LLMs participating at every stage. - -### Documentation Drives Implementation - -```mermaid -Documentation → LLM Understanding → Better Suggestions → Quality Code -``` - -When you start with clear documentation: -1. LLMs understand the complete context -2. Suggestions align with documented intent -3. Generated code fits the design -4. Implementation matches expectations - -### Implementation Updates Documentation - -As you code, discoveries flow back: - -```markdown -## Implementation Notes -[2024-03-15] Discovered rate limiting issue with auth tokens -[2024-03-16] Switched to sliding window approach -[2024-03-17] Added token bucket for burst capacity -``` - -LLMs learn from these updates, improving future suggestions. - -### Real Example: The Feedback Loop in Action - -**Initial Documentation**: -```markdown -## Approach -Implement simple cache with 1-hour TTL -``` - -**LLM Suggestion**: "Consider cache invalidation strategy for multi-region deployment" - -**Updated Documentation**: -```markdown -## Approach -Implement cache with 1-hour TTL -- Use event-based invalidation for consistency -- Redis pub/sub for multi-region coordination -``` - -**Result**: Better implementation informed by LLM insight, captured in documentation. - -### The Multiplier Effect - -Each cycle improves both documentation and code: -- **Clearer intent** → Better LLM suggestions -- **Better suggestions** → Improved implementation -- **Improved implementation** → Refined documentation -- **Refined documentation** → Even clearer intent - -This isn't just about current development – it's about building a knowledge base that makes every future interaction more effective. - -## Future Opportunities for LLM Integration - -We're just scratching the surface of what's possible when development methodologies embrace LLM collaboration. - -### Automated Documentation Validation - -LLMs could continuously validate documentation against implementation: - -```bash -$ stp validate ST0042 -Checking documentation-implementation alignment... -⚠ Implementation includes rate limiting not mentioned in approach -⚠ Task list shows 8 items but only 6 are documented -✓ All objectives have corresponding implementation -``` - -### LLM-Powered Steel Thread Creation - -Imagine describing a feature and having an LLM draft the steel thread: - -``` -You: "We need to add data export functionality for compliance" - -LLM: "I'll create a steel thread for this. Based on your project: -- Objective: Enable GDPR-compliant data export -- Context: Legal requirement, 30-day deadline -- Approach: Queue-based async processing -- Tasks: [generates task breakdown]" -``` - -### Intelligent Context Selection - -Future STP could automatically select relevant context: - -```bash -$ stp context "working on authentication" -Relevant context loaded: -- ST0042: Authentication System (current) -- ST0038: User Service (dependency) -- Journal: 2024-03-15 auth decisions -- Related tasks: task-45, task-46 -``` - -### The MCP Revolution - -Anthropic's Machine Control Protocol opens new possibilities: -- LLMs directly executing STP commands -- Autonomous steel thread management -- Real-time documentation updates -- Integrated development environments - -## Practical LLM Collaboration Workflow - -Here's how STP transforms a typical development session: - -``` -┌───────────────────────────────────────────────────────────┐ -│ STP-Powered LLM Workflow │ -├──────────────────────────┬────────────────────────────────┤ -│ 1. Load Context │ $ cat stp/prj/wip.md │ -│ │ $ stp st show ST0042 │ -│ │ $ stp llm usage_rules │ -├──────────────────────────┼────────────────────────────────┤ -│ 2. Share with LLM │ "Working on ST0042, need help │ -│ │ with refresh token rotation" │ -├──────────────────────────┼────────────────────────────────┤ -│ 3. LLM Understands │ - Sees JWT token decision │ -│ │ - Knows security constraints │ -│ │ - Understands multi-device req │ -│ │ - Knows STP workflow patterns │ -├──────────────────────────┼────────────────────────────────┤ -│ 4. Targeted Solution │ LLM provides rotation strategy │ -│ │ aligned with your architecture │ -├──────────────────────────┼────────────────────────────────┤ -│ 5. Update Documentation │ Add decisions to steel thread │ -│ │ Update task status │ -└──────────────────────────┴────────────────────────────────┘ -``` - -### Leveraging Usage Rules for Better Collaboration - -STP now includes usage rules documentation specifically designed for LLMs: - -```bash -# Help LLMs understand STP workflows -stp llm usage_rules -``` - -This provides LLMs with: -- Command usage patterns and best practices -- Common workflows for steel thread management -- Task integration patterns with Backlog.md -- Guidelines for effective collaboration - -By sharing these usage rules at the start of a session, LLMs gain a deeper understanding of how to work within the STP framework, leading to more accurate suggestions and better alignment with your development workflow. - -## Transforming Development Through Collaboration - -STP doesn't just make LLM collaboration possible – it makes it powerful. By providing structure, context, and clear intention, STP transforms LLMs from code generators into true development partners. - -The future of software development isn't human or AI – it's human and AI, working together with shared understanding. STP provides the foundation for that collaboration. - -Ready to put this into practice? Our next post will guide you through setting up STP in your own projects and creating your first intention-aware, LLM-collaborative development workflow. - -[Continue to: Getting Started with STP →](./0005-getting-started-with-stp.md) diff --git a/stp/doc/blog/workflow-captures/blog-0000-complete.txt b/stp/doc/blog/workflow-captures/blog-0000-complete.txt deleted file mode 100644 index e1611fd..0000000 --- a/stp/doc/blog/workflow-captures/blog-0000-complete.txt +++ /dev/null @@ -1,6 +0,0 @@ -task-10 [done] ST0013 - Write 'The vision behind STP' section for blog 0000 -task-11 [done] ST0013 - Review and polish blog 0000 -task-12 [done] ST0013 - Update metadata and mark blog 0000 as complete -task-6 [done] ST0013 - Research existing docs for blog 0000 - Motivation for STP -task-7 [done] ST0013 - Write introduction section for blog 0000 -task-9 [done] ST0013 - Write 'Why existing approaches fall short' section for blog 0000 diff --git a/stp/doc/blog/workflow-captures/blog-0000-summary.txt b/stp/doc/blog/workflow-captures/blog-0000-summary.txt deleted file mode 100644 index b131307..0000000 --- a/stp/doc/blog/workflow-captures/blog-0000-summary.txt +++ /dev/null @@ -1,10 +0,0 @@ -# Blog 0000 Workflow Summary - -Completed tasks for blog post 0000: - -task-10 [done] ST0013 - Write 'The vision behind STP' section for blog 0000 -task-11 [done] ST0013 - Review and polish blog 0000 -task-12 [done] ST0013 - Update metadata and mark blog 0000 as complete -task-6 [done] ST0013 - Research existing docs for blog 0000 - Motivation for STP -task-7 [done] ST0013 - Write introduction section for blog 0000 -task-9 [done] ST0013 - Write 'Why existing approaches fall short' section for blog 0000 diff --git a/stp/doc/blog/workflow-captures/done-count.txt b/stp/doc/blog/workflow-captures/done-count.txt deleted file mode 100644 index d00491f..0000000 --- a/stp/doc/blog/workflow-captures/done-count.txt +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/stp/doc/blog/workflow-captures/initial-task-list.txt b/stp/doc/blog/workflow-captures/initial-task-list.txt deleted file mode 100644 index ca723f4..0000000 --- a/stp/doc/blog/workflow-captures/initial-task-list.txt +++ /dev/null @@ -1,62 +0,0 @@ -To Do: - task-5 - ST0014 - Add tests for the new directory structure - task-6 - ST0013 - Research existing docs for blog 0000 - Motivation for STP - task-7 - ST0013 - Write introduction section for blog 0000 - task-8 - ST0013 - Write 'Current challenges in development documentation' section for blog 0000 - task-9 - ST0013 - Write 'Why existing approaches fall short' section for blog 0000 - task-10 - ST0013 - Write 'The vision behind STP' section for blog 0000 - task-11 - ST0013 - Review and polish blog 0000 - task-12 - ST0013 - Update metadata and mark blog 0000 as complete - task-13 - ST0013 - Research existing intro content for blog 0001 - Introduction to STP - task-14 - ST0013 - Write opening hook for blog 0001 - task-15 - ST0013 - Write 'What is STP?' section for blog 0001 - task-16 - ST0013 - Write 'Core principles' section for blog 0001 - task-17 - ST0013 - Create ASCII diagram showing STP workflow for blog 0001 - task-18 - ST0013 - Write 'Why STP matters' section for blog 0001 - task-19 - ST0013 - Review and polish blog 0001 - task-20 - ST0013 - Update metadata and mark blog 0001 as complete - task-21 - ST0013 - Research steel thread examples for blog 0002 - task-22 - ST0013 - Write 'What is a steel thread?' section for blog 0002 - task-23 - ST0013 - Write 'Steel threads vs traditional methods' section for blog 0002 - task-24 - ST0013 - Create ASCII diagram of steel thread lifecycle for blog 0002 - task-25 - ST0013 - Write 'Benefits and examples' section for blog 0002 - task-26 - ST0013 - Review and polish blog 0002 - task-27 - ST0013 - Update metadata and mark blog 0002 as complete - task-28 - ST0013 - Research intent capture challenges for blog 0003 - task-29 - ST0013 - Write 'The intent problem' section for blog 0003 - task-30 - ST0013 - Write 'How STP captures intent' section for blog 0003 - task-31 - ST0013 - Write 'Intent and LLMs' section for blog 0003 - task-32 - ST0013 - Create ASCII diagram of intent flow for blog 0003 - task-33 - ST0013 - Write 'Practical examples' section for blog 0003 - task-34 - ST0013 - Review and polish blog 0003 - task-35 - ST0013 - Update metadata and mark blog 0003 as complete - task-36 - ST0013 - Research LLM collaboration patterns for blog 0004 - task-37 - ST0013 - Write 'STP design for LLMs' section for blog 0004 - task-38 - ST0013 - Write 'Context management' section for blog 0004 - task-39 - ST0013 - Write 'Templates and structure' section for blog 0004 - task-40 - ST0013 - Create ASCII diagram of LLM workflow for blog 0004 - task-41 - ST0013 - Write 'Future opportunities' section for blog 0004 - task-42 - ST0013 - Review and polish blog 0004 - task-43 - ST0013 - Update metadata and mark blog 0004 as complete - task-44 - ST0013 - Write 'Installation' section for blog 0005 - task-45 - ST0013 - Write 'Basic commands' section for blog 0005 - task-46 - ST0013 - Write 'Creating your first steel thread' section for blog 0005 - task-47 - ST0013 - Write 'STP+Backlog workflow' meta-section for blog 0005 - task-48 - ST0013 - Add real command outputs from blog writing process for blog 0005 - task-49 - ST0013 - Write 'Best practices' section for blog 0005 - task-50 - ST0013 - Review and polish blog 0005 - task-51 - ST0013 - Update metadata and mark blog 0005 as complete - task-52 - ST0013 - Write 'Current state of STP' section for blog 0006 - task-53 - ST0013 - Write 'Lessons learned' section for blog 0006 - task-54 - ST0013 - Write 'Roadmap' section for blog 0006 - task-55 - ST0013 - Write 'Integration opportunities' section for blog 0006 - task-56 - ST0013 - Write 'Community and contributions' section for blog 0006 - task-57 - ST0013 - Review and polish blog 0006 - task-58 - ST0013 - Update metadata and mark blog 0006 as complete - -done: - task-1 - ST0014 - Create the required directory structure - task-2 - ST0014 - Update steel threads index with new directory info - task-3 - ST0014 - Implement organize_st.sh script - task-4 - ST0014 - Update upgrade script to run organize_st.sh - diff --git a/stp/doc/blog/workflow-captures/progress-after-blog-0001.txt b/stp/doc/blog/workflow-captures/progress-after-blog-0001.txt deleted file mode 100644 index 1acf61e..0000000 --- a/stp/doc/blog/workflow-captures/progress-after-blog-0001.txt +++ /dev/null @@ -1 +0,0 @@ - 0 diff --git a/stp/doc/blog/workflow-captures/st0013-done-count.txt b/stp/doc/blog/workflow-captures/st0013-done-count.txt deleted file mode 100644 index 8351c19..0000000 --- a/stp/doc/blog/workflow-captures/st0013-done-count.txt +++ /dev/null @@ -1 +0,0 @@ -14 diff --git a/stp/doc/blog/workflow-captures/st0013-task-7-in-progress.txt b/stp/doc/blog/workflow-captures/st0013-task-7-in-progress.txt deleted file mode 100644 index 4cdf379..0000000 --- a/stp/doc/blog/workflow-captures/st0013-task-7-in-progress.txt +++ /dev/null @@ -1,10 +0,0 @@ -Tasks for ST0013: -================ -task-10 [todo] ST0013 - Write 'The vision behind STP' section for blog 0000 -task-11 [todo] ST0013 - Review and polish blog 0000 -task-12 [todo] ST0013 - Update metadata and mark blog 0000 as complete -task-13 [todo] ST0013 - Research existing intro content for blog 0001 - Introduction to STP -task-14 [todo] ST0013 - Write opening hook for blog 0001 -task-15 [todo] ST0013 - Write 'What is STP?' section for blog 0001 -task-16 [todo] ST0013 - Write 'Core principles' section for blog 0001 -task-17 [todo] ST0013 - Create ASCII diagram showing STP workflow for blog 0001 diff --git a/stp/doc/blog/workflow-captures/st0013-tasks-initial.txt b/stp/doc/blog/workflow-captures/st0013-tasks-initial.txt deleted file mode 100644 index 8a029b3..0000000 --- a/stp/doc/blog/workflow-captures/st0013-tasks-initial.txt +++ /dev/null @@ -1,54 +0,0 @@ -Tasks for ST0013: -================ -task-10 [todo] ST0013 - Write 'The vision behind STP' section for blog 0000 -task-11 [todo] ST0013 - Review and polish blog 0000 -task-12 [todo] ST0013 - Update metadata and mark blog 0000 as complete -task-13 [todo] ST0013 - Research existing intro content for blog 0001 - Introduction to STP -task-14 [todo] ST0013 - Write opening hook for blog 0001 -task-15 [todo] ST0013 - Write 'What is STP?' section for blog 0001 -task-16 [todo] ST0013 - Write 'Core principles' section for blog 0001 -task-17 [todo] ST0013 - Create ASCII diagram showing STP workflow for blog 0001 -task-18 [todo] ST0013 - Write 'Why STP matters' section for blog 0001 -task-19 [todo] ST0013 - Review and polish blog 0001 -task-20 [todo] ST0013 - Update metadata and mark blog 0001 as complete -task-21 [todo] ST0013 - Research steel thread examples for blog 0002 -task-22 [todo] ST0013 - Write 'What is a steel thread?' section for blog 0002 -task-23 [todo] ST0013 - Write 'Steel threads vs traditional methods' section for blog 0002 -task-24 [todo] ST0013 - Create ASCII diagram of steel thread lifecycle for blog 0002 -task-25 [todo] ST0013 - Write 'Benefits and examples' section for blog 0002 -task-26 [todo] ST0013 - Review and polish blog 0002 -task-27 [todo] ST0013 - Update metadata and mark blog 0002 as complete -task-28 [todo] ST0013 - Research intent capture challenges for blog 0003 -task-29 [todo] ST0013 - Write 'The intent problem' section for blog 0003 -task-30 [todo] ST0013 - Write 'How STP captures intent' section for blog 0003 -task-31 [todo] ST0013 - Write 'Intent and LLMs' section for blog 0003 -task-32 [todo] ST0013 - Create ASCII diagram of intent flow for blog 0003 -task-33 [todo] ST0013 - Write 'Practical examples' section for blog 0003 -task-34 [todo] ST0013 - Review and polish blog 0003 -task-35 [todo] ST0013 - Update metadata and mark blog 0003 as complete -task-36 [todo] ST0013 - Research LLM collaboration patterns for blog 0004 -task-37 [todo] ST0013 - Write 'STP design for LLMs' section for blog 0004 -task-38 [todo] ST0013 - Write 'Context management' section for blog 0004 -task-39 [todo] ST0013 - Write 'Templates and structure' section for blog 0004 -task-40 [todo] ST0013 - Create ASCII diagram of LLM workflow for blog 0004 -task-41 [todo] ST0013 - Write 'Future opportunities' section for blog 0004 -task-42 [todo] ST0013 - Review and polish blog 0004 -task-43 [todo] ST0013 - Update metadata and mark blog 0004 as complete -task-44 [todo] ST0013 - Write 'Installation' section for blog 0005 -task-45 [todo] ST0013 - Write 'Basic commands' section for blog 0005 -task-46 [todo] ST0013 - Write 'Creating your first steel thread' section for blog 0005 -task-47 [todo] ST0013 - Write 'STP+Backlog workflow' meta-section for blog 0005 -task-48 [todo] ST0013 - Add real command outputs from blog writing process for blog 0005 -task-49 [todo] ST0013 - Write 'Best practices' section for blog 0005 -task-50 [todo] ST0013 - Review and polish blog 0005 -task-51 [todo] ST0013 - Update metadata and mark blog 0005 as complete -task-52 [todo] ST0013 - Write 'Current state of STP' section for blog 0006 -task-53 [todo] ST0013 - Write 'Lessons learned' section for blog 0006 -task-54 [todo] ST0013 - Write 'Roadmap' section for blog 0006 -task-55 [todo] ST0013 - Write 'Integration opportunities' section for blog 0006 -task-56 [todo] ST0013 - Write 'Community and contributions' section for blog 0006 -task-57 [todo] ST0013 - Review and polish blog 0006 -task-58 [todo] ST0013 - Update metadata and mark blog 0006 as complete -task-6 [todo] ST0013 - Research existing docs for blog 0000 - Motivation for STP -task-7 [todo] ST0013 - Write introduction section for blog 0000 -task-9 [todo] ST0013 - Write 'Why existing approaches fall short' section for blog 0000 diff --git a/stp/eng/prompts/regenerate_usage_rules.md b/stp/eng/prompts/regenerate_usage_rules.md deleted file mode 100644 index 2d25b6f..0000000 --- a/stp/eng/prompts/regenerate_usage_rules.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 ---- -# Prompt to Regenerate STP Usage Rules - -Use this prompt to regenerate the `stp/eng/usage-rules.md` document when STP is updated with new features or commands. - -## The Prompt - -You need to create a usage-rules.md document for the Steel Thread Process (STP) system. This document should follow the pattern established by the Elixir Hex package `usage_rules` (see https://hexdocs.pm/usage_rules/readme.html). - -### Context - -STP is a structured development and documentation system designed for collaboration between developers and Large Language Models (LLMs). The usage-rules.md document should help LLMs understand how to effectively use STP commands and workflows. - -### Requirements - -1. **Focus on Usage, Not Implementation** - - Document HOW to use STP, not how it works internally - - Emphasize workflows and practical patterns - - Provide clear examples of command usage - -2. **Document Structure** - Follow this structure: - - Introduction (brief overview of what STP is for) - - Core Workflows (common patterns for using STP) - - Command Usage Patterns (practical usage for each command) - - Steel Thread Workflows (creating and managing steel threads) - - Task Management Integration (using Backlog.md through STP) - - LLM Collaboration Patterns (best practices for AI assistance) - - Further Reading (references to blog posts) - -3. **Information Gathering** - To create the document, you should: - - Run `stp help` to see all available commands - - Run `stp help <command>` for each command to understand its usage - - Review `stp/usr/user_guide.md` for task-based workflows - - Review `stp/usr/reference_guide.md` for comprehensive command details - - Check `stp/doc/blog/` directory for conceptual blog posts - - Look at `CLAUDE.md` for current project conventions - -4. **Writing Style** - - Be concise but comprehensive - - Use practical examples - - Focus on patterns and workflows - - Write for LLM consumption (clear, structured, unambiguous) - - Include command examples with expected outputs - - Highlight common mistakes and how to avoid them - -5. **Key Patterns to Document** - - Starting a new project with STP - - Creating and managing steel threads - - Using the task management integration - - Synchronizing steel thread status with tasks - - Upgrading STP files to new versions - - Working with LLMs using STP structure - -6. **Blog Post References** - Include strategic references to these blog posts for deeper understanding: - - `0000-motivation-for-stp.md` - Why intention matters - - `0001-introduction-to-stp.md` - Overview of the system - - `0002-the-steel-thread-methodology.md` - Understanding steel threads - - `0003-intent-capture-in-software-development.md` - Philosophy behind STP - - `0004-llm-collaboration-with-stp.md` - Working with AI assistants - - `0005-getting-started-with-stp.md` - Practical tutorial - -7. **Output Location** - Save the generated document to: `stp/eng/usage-rules.md` - -### Example Section Format - -Here's an example of how to format a section: - -```markdown -## Creating a New Steel Thread - -Steel threads are the core unit of work in STP. Here's how to create and manage them effectively: - -### Basic Creation - -```bash -stp st new "Implement user authentication" -``` - -This creates a new steel thread with: -- Auto-generated ID (e.g., ST0015) -- Template structure for documentation -- Status set to "Not Started" - -### Best Practices - -1. **Clear Titles**: Use descriptive, action-oriented titles -2. **One Feature Per Thread**: Keep threads focused on single features -3. **Document Intent**: Fill in the Intent section immediately - -### Common Mistakes - -- Creating threads that are too broad -- Forgetting to update status as work progresses -- Not linking tasks to threads - -### Integration with Tasks - -After creating a steel thread, create linked tasks: - -```bash -stp task create ST0015 "Design authentication schema" -stp task create ST0015 "Implement login endpoint" -``` - -For deeper understanding of the steel thread methodology, see the blog post on [The Steel Thread Methodology](../doc/blog/0002-the-steel-thread-methodology.md). -``` - -Remember: The goal is to help LLMs understand how to use STP effectively in real development scenarios. \ No newline at end of file diff --git a/stp/eng/usage-rules.md b/stp/eng/usage-rules.md deleted file mode 100644 index 8b91dec..0000000 --- a/stp/eng/usage-rules.md +++ /dev/null @@ -1,423 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.2: Matthew Sinclair - Updated for steel thread directory structure" -stp_version: 1.2.1 ---- -# STP Usage Rules - -This document provides usage patterns and guidelines for working with the Steel Thread Process (STP) system. It's designed to help Large Language Models (LLMs) understand how to effectively use STP commands and workflows in development scenarios. - -## Introduction - -STP (Steel Thread Process) is a structured development system that facilitates collaboration between developers and LLMs through: - -- **Steel Threads**: Self-contained units of work with clear intent, organized as directories (v1.2.1+) -- **Structured Documentation**: Templates that capture context and decisions across multiple files -- **Task Integration**: Fine-grained task management linked to larger goals -- **Intent Preservation**: Methodologies for maintaining project context - -## Core Workflows - -### Starting a New STP Project - -```bash -# Initialize STP in a new project -stp init "My Project" ./my-project - -# Or initialize in current directory -stp init "My Project" -``` - -This creates the STP directory structure and essential files: - -- `stp/` - Main documentation directory -- `CLAUDE.md` - Project-specific instructions for LLMs -- Initial templates and documentation - -### Daily Development Workflow - -1. **Check Current Work** - - ```bash - # View current work in progress - cat stp/prj/wip.md - - # List active steel threads - stp st list --status "In Progress" - ``` - -2. **Update Task Status** - - ```bash - # Check tasks for a steel thread - stp task list ST0014 - - # View task board - stp bl board - ``` - -3. **Document Progress** - - Update `stp/prj/wip.md` with current focus - - Mark completed tasks: `stp bl task edit <task-id>` - - Update steel thread status if needed - -## Command Usage Patterns - -### Steel Thread Management (`stp st`) - -Steel threads are the backbone of STP methodology. They represent coherent units of work with clear intent. - -#### Creating Steel Threads - -```bash -# Create a new steel thread directory -stp st new "Implement OAuth2 authentication" -# Output: Created new steel thread: ST0015 -``` - -This creates a directory structure: -``` -ST0015/ -├── info.md # Main information and metadata -├── design.md # Design decisions -├── impl.md # Implementation details -├── tasks.md # Task tracking -└── results.md # Results and outcomes -``` - -**Best Practices:** - -- Use clear, action-oriented titles -- One feature or fix per thread -- Create thread before starting work -- Start documenting in info.md immediately - -#### Managing Steel Thread Lifecycle - -```bash -# List all threads -stp st list - -# Filter by status -stp st list --status "In Progress" - -# View thread details (shows info.md by default) -stp st show ST0015 - -# View specific file -stp st show ST0015 design # Show design.md -stp st show ST0015 impl # Show impl.md -stp st show ST0015 all # Show all files - -# Edit thread files -stp st edit ST0015 # Edit info.md (default) -stp st edit ST0015 design # Edit design.md -stp st edit ST0015 impl # Edit impl.md - -# Mark as complete -stp st done ST0015 -``` - -#### Synchronizing Thread Index - -```bash -# Preview synchronization -stp st sync - -# Write updates to steel_threads.md -stp st sync --write -``` - -### Task Management Integration (`stp task`, `stp bl`) - -STP integrates with Backlog.md for fine-grained task tracking while maintaining the high-level steel thread structure. - -#### Task Creation and Management - -```bash -# Create tasks linked to a steel thread -stp task create ST0015 "Design database schema" -stp task create ST0015 "Implement login endpoint" -stp task create ST0015 "Add session management" - -# List tasks for a thread -stp task list ST0015 -``` - -#### Using the Backlog Wrapper - -```bash -# Initialize backlog (one-time setup) -stp bl init - -# List all tasks (without git errors) -stp bl list - -# View kanban board -stp bl board - -# Edit a specific task -stp bl task edit task-5 -``` - -**Why use `stp bl` instead of `backlog` directly?** - -- Prevents git fetch errors in local projects -- Adds `--plain` flag automatically -- Configured for STP workflow - -### Status Synchronization (`stp status`) - -Keep steel thread status in sync with task completion: - -```bash -# Update thread status based on task completion -stp status sync ST0015 - -# Show status summary -stp status show ST0015 -``` - -Status rules: - -- 0% tasks complete → "Not Started" -- 1-99% complete → "In Progress" -- 100% complete → "Completed" - -### Upgrading STP Files (`stp upgrade`) - -Keep your STP installation current: - -```bash -# Upgrade all STP files to latest format -stp upgrade - -# Force upgrade even with major version changes -stp upgrade --force -``` - -This command: - -- Updates file metadata -- Adds missing fields -- Migrates steel threads to directory structure (v1.2.0 → v1.2.1) -- Synchronizes steel thread index -- Reports all changes - -**v1.2.1 Migration:** - -When upgrading from v1.2.0 to v1.2.1, the upgrade command will: -- Convert single ST####.md files to ST####/ directories -- Split content into appropriate files (info.md, design.md, etc.) -- Create backups in .stp_backup/1.2.1/ -- Maintain all existing content and metadata - -## Steel Thread Workflows - -### Complete Steel Thread Workflow - -1. **Create Thread** - - ```bash - stp st new "Add user profile management" - ``` - -2. **Document Intent** - - ```bash - # Edit the main info file - stp st edit ST0016 - ``` - - In info.md: - - Fill in the Objective section immediately - - Document constraints and assumptions in Context - - Note any relevant background - - In design.md (when ready): - ```bash - stp st edit ST0016 design - ``` - - Document approach and architecture - - Capture key design decisions - - Note alternatives considered - -3. **Break Down into Tasks** - - ```bash - stp task create ST0016 "Design profile data model" - stp task create ST0016 "Create profile API endpoints" - stp task create ST0016 "Build profile UI components" - stp task create ST0016 "Add profile tests" - ``` - -4. **Track Progress** - - ```bash - # Start work on a task - stp bl task edit task-10 # Change status to "In Progress" - - # Check thread status - stp status show ST0016 - ``` - -5. **Complete Thread** - - ```bash - # When all tasks are done - stp st done ST0016 - - # Sync the index - stp st sync --write - ``` - -### Migrating Embedded Tasks - -For steel threads with tasks listed in the document: - -```bash -# Migrate embedded tasks to Backlog -stp migrate ST0014 - -# This extracts tasks and creates them in Backlog -# Original tasks are preserved in an archive section -``` - -## LLM Collaboration Patterns - -### Session Initialization - -When starting an LLM session: - -1. LLM reads `CLAUDE.md` for project context -2. LLM reads `stp/eng/tpd/technical_product_design.md` -3. LLM checks `stp/prj/wip.md` for current work -4. LLM can run `stp st list --status "In Progress"` - -### Working on a Steel Thread - -When an LLM is assigned to work on a steel thread: - -```bash -# First, understand the thread -stp st show ST0015 - -# Check existing tasks -stp task list ST0015 - -# View detailed task information -stp bl list | grep "ST0015" - -# Create new tasks as needed -stp task create ST0015 "Additional task discovered" -``` - -### Maintaining Context - -- Update `stp/prj/wip.md` when starting/stopping work -- Document decisions in steel thread files -- Keep `CLAUDE.md` updated with project conventions -- End sessions by updating task status in Backlog - -## Common Patterns and Anti-Patterns - -### Good Patterns - -✅ **Create Thread First, Then Tasks** - -```bash -stp st new "Feature X" -stp task create ST0017 "Task 1" -stp task create ST0017 "Task 2" -``` - -✅ **Regular Status Syncs** - -```bash -# After completing tasks -stp status sync ST0017 -stp st sync --write -``` - -✅ **Document Intent Immediately** - -- Fill in Intent section when creating thread -- Capture "why" not just "what" - -### Anti-Patterns - -❌ **Creating Overly Broad Threads** - -- Bad: "Improve application" -- Good: "Add input validation to user forms" - -❌ **Skipping Status Updates** - -- Threads show wrong status -- Team loses visibility - -❌ **Working Without Threads** - -- Lost context and intent -- No clear completion criteria - -## Integration Best Practices - -### With Git - -```bash -# Good commit messages reference threads -git commit -m "ST0015: Implement login endpoint" - -# Include thread ID in PR titles -gh pr create --title "ST0015: OAuth2 Authentication" -``` - -### With Documentation - -- Keep Backlog tasks updated with detailed progress -- Link blog posts from thread documents -- Update user guides when adding features - -### With CI/CD - -- Run `stp st sync --write` in CI to catch inconsistencies -- Validate thread status matches task completion -- Check for incomplete threads before release - -## Further Reading - -For deeper understanding of STP concepts and philosophy: - -- [Motivation for STP](../doc/blog/0000-motivation-for-stp.md) - Why intention matters in software -- [Introduction to STP](../doc/blog/0001-introduction-to-stp.md) - System overview and benefits -- [The Steel Thread Methodology](../doc/blog/0002-the-steel-thread-methodology.md) - Deep dive into steel threads -- [Intent Capture in Software Development](../doc/blog/0003-intent-capture-in-software-development.md) - Techniques for preserving context -- [LLM Collaboration with STP](../doc/blog/0004-llm-collaboration-with-stp.md) - Working effectively with AI -- [Getting Started with STP](../doc/blog/0005-getting-started-with-stp.md) - Practical tutorial - -## Quick Reference Card - -```bash -# Initialize -stp init "Project Name" - -# Create work -stp st new "Feature description" -stp task create ST#### "Task description" - -# Track progress -stp st list --status "In Progress" -stp task list ST#### -stp bl board - -# Update status -stp bl task edit task-id -stp status sync ST#### -stp st done ST#### - -# Maintain system -stp upgrade -stp st sync --write -``` - -Remember: STP is about capturing intent and maintaining context throughout development. Use it to create a clear narrative of your project's evolution. diff --git a/stp/llm/llm_preamble.md b/stp/llm/llm_preamble.md deleted file mode 100644 index ab47559..0000000 --- a/stp/llm/llm_preamble.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 ---- -# LLM Preamble - -This document provides essential context for LLMs working with the Steel Thread Process (STP) codebase. - -## Project Overview - -STP is a system designed to create a structured workflow and documentation process for developers collaborating with Large Language Models like you. It provides: - -1. A standardized directory structure for project documentation -2. Shell scripts for managing project workflows -3. A methodology centered around "steel threads" - self-contained units of work -4. Markdown templates for documentation -5. Testing frameworks for ensuring reliability - -The system is intentionally lightweight, using only shell scripts and markdown files to maximize portability and minimize dependencies. It integrates with existing development workflows and helps preserve context across development sessions with LLMs. - -## Navigation Guide - -When working with this repository, you should focus on these key documents in this specific order: - -1. **START HERE**: `stp/eng/tpd/technical_product_design.md` - Contains comprehensive information about the project vision, architecture and current state. Pay special attention to the "Preamble to Claude" section. - -2. **NEXT**: `stp/prj/st/steel_threads.md` - Provides a complete index of all steel threads with their status. Review this to understand what work has been completed and what remains. - -3. **THEN**: `stp/prj/wip.md` - Details the current work in progress and priorities. This is your guide to what should be worked on now. - -4. **FINALLY**: Use `stp bl list` and steel thread documents to review historical work completed. Backlog tasks provide detailed progress tracking. - -## Key System Components - -The STP system consists of these main components: - -1. **Core Script Framework**: Shell scripts in `stp/bin/` that manage steel threads and documentation workflow - - `stp` - Main entry point script - - `stp_init` - Initializes a new STP project - - `stp_st` - Manages steel threads (new, list, show, done) - - `stp_help` - Provides help information - -2. **Documentation Structure**: Organized markdown files in specific directories - - `stp/prj/` - Project management documents - - `stp/eng/` - Engineering documentation - - `stp/usr/` - User documentation - - `stp/llm/` - LLM-specific content (like this preamble) - -3. **Test Suite**: BATS-based tests in `stp/tests/` that verify functionality - - Tests for core scripts, initialization, and steel thread management - -## Current Status - -The STP system has completed 11 steel threads so far, implementing all core functionality: - -- Directory structure ✓ -- Core script framework ✓ -- Template system ✓ -- Steel thread commands ✓ -- Initialization ✓ -- Help system ✓ -- User documentation ✓ -- LLM integration ✓ -- Process refinement ✓ -- Test suite implementation ✓ - -Future work (potential new steel threads) may include: - -- Anthropic MCP integration -- CI/CD integration for automated testing -- Configuration commands for customizing STP behavior -- Enhanced version control integration - -## Development Guidelines - -1. **Code Style**: - - Use 2-space indentation in any programming language - - Follow language-specific conventions as noted in CLAUDE.md - - Maintain POSIX compatibility for scripts to ensure cross-platform support - -2. **Documentation**: - - Keep markdown documents consistently formatted - - Update documentation as part of any implementation work - - Follow the verblock pattern for versioning (`verblock: "DD MMM YYYY:vX.Y: Author - Note"`) - -3. **Steel Thread Process**: - - Work is organized into steel threads (ST####) - - Steel threads have states: Not Started, In Progress, Completed, On Hold, Cancelled - - Each steel thread has its own markdown document in `stp/prj/st/` - -## How to Help - -When assisting with this project, you should: - -1. First, understand the current context by reviewing the documents in the order specified -2. Focus on the work in progress as defined in `stp/prj/wip.md` -3. Maintain consistency with existing patterns and documentation standards -4. Update documentation alongside code changes -5. When suggesting improvements, reference relevant architectural patterns -6. Update task status in Backlog to track progress - -Most tasks will involve implementing new functionality, enhancing existing features, or improving documentation within the steel thread framework. If needed, use the shell scripts to create or update steel threads. diff --git a/stp/prj/archive/journal-deprecated.md b/stp/prj/archive/journal-deprecated.md deleted file mode 100644 index b8db1ef..0000000 --- a/stp/prj/archive/journal-deprecated.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.3: Matthew Sinclair - Deprecated in favor of Backlog task tracking" -stp_version: 1.2.0 ---- -# Project Journal (DEPRECATED) - -> **⚠️ DEPRECATION NOTICE**: As of July 9, 2025, the journal.md file has been deprecated. -> Historical tracking and project narrative are now maintained through Backlog tasks and steel threads. -> This file is preserved for historical reference only. -> -> For ongoing work tracking, use: -> - `stp bl` commands for task management -> - Steel threads for high-level intent and context -> - Backlog tasks for detailed work history - ---- - -# Original Project Journal Content - -This document maintains a chronological record of project activities, decisions, and progress. It serves as a historical narrative of the Steel Thread Process's development. - -## 20250709 - -### LLM Usage Rules Implementation - -Implemented comprehensive usage rules documentation and tooling for LLM integration: - -Key accomplishments: - -- Created `stp/eng/usage-rules.md` following the Elixir Hex package 'usage_rules' pattern -- Implemented `stp llm` command with `usage_rules` subcommand to display the documentation -- Added `--symlink` option to create symlinks for integration with projects expecting usage-rules.md files -- Renamed file from `usage_rules.md` to `usage-rules.md` to follow established conventions -- Created comprehensive test suite for the new llm command -- Updated all documentation to reference the new command - -The usage rules document provides: - -- Detailed command usage patterns and workflows -- Best practices for steel thread management -- Task integration patterns with Backlog.md -- LLM collaboration guidelines -- Common patterns and anti-patterns - -The `--symlink` option enables easy integration with Elixir projects or other tools that look for usage-rules.md files, creating a symlink in the current or specified directory. - -## 20250320 - -### Backlog.md Integration - -Successfully integrated Backlog.md with STP to provide fine-grained task management capabilities. This integration allows developers to track detailed tasks linked to steel threads, providing better visibility into work progress and enabling automatic status synchronization. - -Key accomplishments: - -- Created `stp bl` wrapper to provide a streamlined interface to Backlog.md while avoiding git fetch errors -- Implemented `stp task` command for creating and listing tasks linked to specific steel threads -- Implemented `stp status` command to synchronize steel thread status based on task completion metrics -- Implemented `stp migrate` command to help users migrate existing embedded checkbox tasks to Backlog -- Created comprehensive test suites for all new commands -- Updated all documentation (user guide, reference guide, deployment guide, TPD) to reflect the integration - -The integration follows STP naming conventions with tasks automatically named in the format "ST#### - Description" for clear traceability. The `stp bl` wrapper configures Backlog for local use, disabling remote operations that can cause errors. - -This enhancement provides: - -- Granular task tracking that complements high-level steel threads -- Automatic status updates based on task completion -- Seamless migration path from embedded tasks -- Error-free operation through the wrapper interface - -## 20250603 - -### Documentation Update - -Updated all project documentation to reflect the current state: - -- Updated steel_threads.md to show all completed steel threads and ST0010 (MCP Integration) as on hold -- Updated wip.md with a new "Completed Steel Threads" section to provide better visibility -- Revised "Next Steps" section to focus on potential future enhancements - -### Test Suite Implementation (ST0011) - -Completed the comprehensive test suite for STP using the Bats (Bash Automated Testing System) framework. Created tests for: - -- Bootstrap script: Verifies directory structure and file creation -- Init command: Tests project initialization with various parameters -- Steel thread commands: Tests creation, listing, displaying, and completion of steel threads -- Help command: Tests help system functionality -- Main script: Tests core command dispatcher - -The test architecture includes: - -- Isolated test environments using temporary directories -- Custom assertions for file system operations -- Mock functions for simulating various environments -- Test helper library for common functions -- Scripts for running tests and setting up the test environment - -This implementation establishes a foundation for ensuring ongoing reliability of STP as new features are added. Only remaining task is setting up continuous integration for automated testing. - -### New Steel Thread Creation - -Created two new steel threads based on emergent needs: - -- ST0010: Anthropic MCP Integration - For exploring the use of Anthropic's Machine Control Protocol in STP scripts. This work is currently on hold and can be addressed later. -- ST0011: Test Suite Implementation - For building the automated test framework. This steel thread has been completed. - -### Directory Naming Update - -Completed the migration from the old "doc" directory reference to the new "stp" directory name in all scripts and documentation files. This ensures consistency throughout the codebase. - -## 20250306 - -### Project Initialization - -The Steel Thread Process (STP) was initiated today. The goal is to create a structured workflow and documentation process for developers working collaboratively with Large Language Models (LLMs). - -### Specification Review - -Reviewed the initial specification for STP. The system will consist of three main components: - -1. Templates: Markdown-based document templates in a structured directory layout -2. Scripts: Shell scripts for managing STP workflows -3. Process: Guidelines and instructions for the steel thread methodology - -### Directory Structure Design - -Created the initial directory structure for STP. Decided to use "eng" instead of "des" for engineering documentation to better reflect the content. The structure includes: - -- prj/: Project documentation -- eng/: Engineering documentation -- usr/: User documentation -- llm/: LLM-specific content -- bin/: STP scripts -- _templ/: Templates - -### Technical Product Design - -Created the Technical Product Design (TPD) document to serve as the central specification for STP. The TPD is organized into multiple sections for better organization and maintenance: - -1. Introduction: Purpose, scope, and overview -2. Requirements: Functional and non-functional requirements -3. Architecture: System design and component interactions -4. Detailed Design: Implementation specifications -5. Implementation Strategy: Development approach using steel threads -6. Deployment and Operations: Installation and usage -7. Technical Challenges and Mitigations: Risk analysis -8. Appendices: Supporting information - -### Steel Thread Planning - -Identified initial steel threads for STP implementation: - -- ST0001: Directory Structure - Create the foundational directory layout -- ST0002: Core Script Framework - Implement the main STP script -- ST0003: Template System - Create document templates -- ST0004: Steel Thread Commands - Implement thread management -- ST0005: Initialization Command - Create project initialization -- ST0006: Help System - Implement help and documentation -- ST0007: User Documentation - Create user guides -- ST0008: LLM Integration - Develop LLM-specific features -- ST0009: Process Refinement - Refine based on usage - -### Bootstrap Script - -Created the bootstrap script to automate STP setup. The script: - -- Creates the complete directory structure -- Copies templates to appropriate locations -- Initializes the TPD with all sections -- Sets up executable permissions for scripts - -This provides a quick way to set up the STP development environment. - -### Next Steps - -Tomorrow will focus on implementing ST0001 (Directory Structure) and ST0002 (Core Script Framework). The goal is to have a working prototype of the main STP script that can dispatch commands to sub-scripts. \ No newline at end of file diff --git a/stp/prj/st/COMPLETED/ST0001/results.md b/stp/prj/st/COMPLETED/ST0001/results.md deleted file mode 100644 index e0820b6..0000000 --- a/stp/prj/st/COMPLETED/ST0001/results.md +++ /dev/null @@ -1,14 +0,0 @@ -# Results - ST0001: Directory Structure - -## Results - -The directory structure for the STP system was successfully implemented with all planned components: - -- Created the main directory hierarchy for documentation, templates, and scripts -- Established clear separation between template files and active project files -- Created placeholder files to maintain directory structure -- Implemented a logical organization that supports the STP workflow -- Added appropriate naming conventions for consistency - -The directory structure provides a solid foundation for the STP system, enabling all other steel threads to build upon this organizational framework. The structure is intuitive for users and supports the various document types and workflows necessary for the system. - diff --git a/stp/prj/st/COMPLETED/ST0002/results.md b/stp/prj/st/COMPLETED/ST0002/results.md deleted file mode 100644 index b084800..0000000 --- a/stp/prj/st/COMPLETED/ST0002/results.md +++ /dev/null @@ -1,16 +0,0 @@ -# Results - ST0002: Core Script Framework - -## Results - -The core script framework was successfully implemented with all planned components: - -- Created a modular command dispatching system -- Implemented environment variable handling and configuration -- Created a robust error handling framework -- Built a help system for documentation access -- Implemented core commands (init, st, help) -- Tested functionality across different environments -- Added verbose mode for debugging and troubleshooting - -The implementation provides a solid foundation for the STP command-line interface, with a modular design that makes it easy to add new commands and extend functionality. The error handling is robust, providing clear messages to users when issues occur. The command dispatching system efficiently routes commands to their specific implementation scripts, maintaining a clean separation of concerns. - diff --git a/stp/prj/st/COMPLETED/ST0003/results.md b/stp/prj/st/COMPLETED/ST0003/results.md deleted file mode 100644 index e8d8770..0000000 --- a/stp/prj/st/COMPLETED/ST0003/results.md +++ /dev/null @@ -1,13 +0,0 @@ -# Results - ST0003: Template System - -## Results - -The template system was successfully implemented with templates for all document types. The system provides: - -- Consistent document formats across the project -- Clear guidance to users on what information to include -- LLM-specific sections to aid in document generation and maintenance -- A logical organization that mirrors the final document structure - -The template system serves as a strong foundation for the STP project, ensuring documentation consistency and completeness. - diff --git a/stp/prj/st/COMPLETED/ST0004/results.md b/stp/prj/st/COMPLETED/ST0004/results.md deleted file mode 100644 index 855b36f..0000000 --- a/stp/prj/st/COMPLETED/ST0004/results.md +++ /dev/null @@ -1,14 +0,0 @@ -# Results - ST0004: Steel Thread Commands - -## Results - -The steel thread command subsystem was successfully implemented with all planned functionality. The system provides: - -- A user-friendly interface for managing steel threads -- Automatic generation of steel thread documents from templates -- Consistent tracking of steel thread status and metadata -- Cross-platform support for editing and viewing threads -- A formatted display of steel thread listings with proper headers - -The implementation helps enforce consistency in steel thread documentation while making it easy for users to create and manage threads throughout a project lifecycle. - diff --git a/stp/prj/st/COMPLETED/ST0005/results.md b/stp/prj/st/COMPLETED/ST0005/results.md deleted file mode 100644 index 9b38db9..0000000 --- a/stp/prj/st/COMPLETED/ST0005/results.md +++ /dev/null @@ -1,14 +0,0 @@ -# Results - ST0005: Initialization Command - -## Results - -The initialization command was successfully implemented with all planned functionality. The system: - -- Creates a complete project structure with all necessary directories -- Instantiates templates with project-specific information -- Creates a configuration file for use by other STP commands -- Provides a smooth user experience with appropriate feedback -- Ensures consistency in project structure across different projects - -The implementation significantly reduces the time required to set up a new project and ensures that all projects follow a consistent structure. - diff --git a/stp/prj/st/COMPLETED/ST0006/results.md b/stp/prj/st/COMPLETED/ST0006/results.md deleted file mode 100644 index d95fbc7..0000000 --- a/stp/prj/st/COMPLETED/ST0006/results.md +++ /dev/null @@ -1,14 +0,0 @@ -# Results - ST0006: Help System - -## Results - -The help system was successfully implemented with all planned functionality. The system: - -- Provides clear, concise help for all STP commands -- Supports both general help and command-specific detailed help -- Dynamically discovers commands, making it extensible as new commands are added -- Maintains a consistent format across all help documentation -- Improves user experience by providing usage examples and clear instructions - -The implementation significantly improves usability by making it easy for users to learn how to use the system and discover available functionality. - diff --git a/stp/prj/st/COMPLETED/ST0007/results.md b/stp/prj/st/COMPLETED/ST0007/results.md deleted file mode 100644 index dc8b4dc..0000000 --- a/stp/prj/st/COMPLETED/ST0007/results.md +++ /dev/null @@ -1,15 +0,0 @@ -# Results - ST0007: User Documentation - -## Results - -The user documentation was successfully created with all planned components. The documentation: - -- Provides clear guidance for new users to get started -- Offers detailed reference information for advanced users -- Includes examples and best practices for common scenarios -- Is organized logically for easy navigation -- Balances brevity with completeness -- Accommodates different learning styles and needs - -The implementation helps users adopt and effectively use the STP system, reducing the learning curve and improving productivity. - diff --git a/stp/prj/st/COMPLETED/ST0008/results.md b/stp/prj/st/COMPLETED/ST0008/results.md deleted file mode 100644 index 113bb75..0000000 --- a/stp/prj/st/COMPLETED/ST0008/results.md +++ /dev/null @@ -1,15 +0,0 @@ -# Results - ST0008: LLM Integration - -## Results - -The LLM integration was successfully implemented with all planned components. The integration: - -- Provides consistent context to LLMs for better generation results -- Streamlines document creation and maintenance tasks -- Offers guidance to users on effective LLM collaboration -- Maintains human oversight and quality control -- Reduces time spent on repetitive documentation tasks -- Improves document consistency and completeness - -The implementation significantly enhances the productivity of STP users by providing AI assistance while maintaining appropriate human control over the final output. - diff --git a/stp/prj/st/COMPLETED/ST0009/results.md b/stp/prj/st/COMPLETED/ST0009/results.md deleted file mode 100644 index 87f28d4..0000000 --- a/stp/prj/st/COMPLETED/ST0009/results.md +++ /dev/null @@ -1,15 +0,0 @@ -# Results - ST0009: Process Refinement - -## Results - -The process refinement effort significantly improved the STP system, resulting in: - -- More intuitive and efficient user workflows -- Reduced friction in common tasks -- Better error handling and user feedback -- Enhanced cross-platform compatibility -- More comprehensive and accessible documentation -- Improved LLM assistance through better prompts and context - -The refinements have made the system more user-friendly and effective, addressing the key pain points identified through user feedback while maintaining the core functionality and purpose of STP. - diff --git a/stp/prj/st/COMPLETED/ST0012/results.md b/stp/prj/st/COMPLETED/ST0012/results.md deleted file mode 100644 index 3b33927..0000000 --- a/stp/prj/st/COMPLETED/ST0012/results.md +++ /dev/null @@ -1,18 +0,0 @@ -# Results - ST0012: Document Sync Command - -## Results - -The Document Sync Command feature was successfully implemented, providing the following benefits: - -1. **Automated Consistency**: The steel_threads.md document is now automatically kept in sync with the individual steel thread files, eliminating manual updates and ensuring consistency. - -2. **Configurable Output**: The command supports customizable table widths to ensure proper formatting for both terminal output and document integration. - -3. **Metadata Support**: The implementation handles both YAML frontmatter and document body metadata, providing flexibility in how steel thread information is stored. - -4. **Non-Destructive Updates**: The section marker approach allows for updating specific parts of the document while preserving manually edited sections. - -5. **Comprehensive Tests**: Added test cases ensure the feature works correctly and will continue to function after future changes. - -The sync command provides a significant improvement in the maintainability of the STP documentation system by automating what was previously a manual process. This reduces the likelihood of documentation inconsistencies and saves time when managing steel threads. - diff --git a/stp/prj/st/COMPLETED/ST0014/results.md b/stp/prj/st/COMPLETED/ST0014/results.md deleted file mode 100644 index 3c0d4c5..0000000 --- a/stp/prj/st/COMPLETED/ST0014/results.md +++ /dev/null @@ -1,5 +0,0 @@ -# Results - ST0014: Directory Structure for Steel Threads - -## Results -[To be completed after implementation] - diff --git a/stp/prj/st/NOT-STARTED/ST0011/results.md b/stp/prj/st/NOT-STARTED/ST0011/results.md deleted file mode 100644 index 22896a4..0000000 --- a/stp/prj/st/NOT-STARTED/ST0011/results.md +++ /dev/null @@ -1,69 +0,0 @@ -# Results - ST0011: Test Suite Implementation - -## Results - -### Current Status (Partial Implementation) - -The test suite has been successfully implemented with the following components: - -1. **Directory Structure**: - - Created an organized test directory structure with separate sections for components - - Implemented a fixtures directory for test data - - Set up a lib directory for shared testing functionality - -2. **Test Helper Library**: - - Created a comprehensive test_helper.bash with common functions - - Implemented isolation between tests using temporary directories - - Added custom assertions for file system verification - - Created mock object functionality for testing environmental dependencies - -3. **Component Tests**: - - Implemented bootstrap_test.bats with 11 individual tests for the bootstrap script - - Implemented init_test.bats with 8 individual tests for the init command - - Implemented st_test.bats with 10 individual tests for the steel thread commands - - Implemented help_test.bats with 6 individual tests for the help command - - Implemented main_test.bats with 6 individual tests for the main stp script - -4. **Test Runner**: - - Created run_tests.sh to execute all tests or specific test suites - - Added colorized output for better readability - - Added error reporting and success messages - - Fixed bug to exclude library test files from test runs - -5. **Test Environment Setup**: - - Created setup_test_env.sh to install test dependencies - - Added support for library installation - - Created functionality for adapting to different installation configurations - - Added .gitignore file to exclude test libraries from source control - -### Remaining Work - -The following work is still needed to complete this steel thread: - -1. **Continuous Integration**: - - Set up CI configuration for automated testing - - Create CI workflow definition - - Configure test reporting and notification - -2. **Additional Test Coverage**: - - Add tests for edge cases and error handling - - Create additional tests for LLM integration features - - Add performance tests - -3. **Documentation Updates**: - - Update the technical product design with test suite information - - Create user documentation for running and extending tests - - Document test patterns and best practices - -### Lessons Learned - -1. Bash script testing requires careful isolation of the test environment -2. Mocking and simulation are essential for testing filesystem operations -3. A comprehensive test helper library significantly reduces test code duplication -4. Temporary directory management is critical for clean test runs -5. Support for different environments requires flexible path handling -6. Testing interactive scripts requires special handling, like using the `expect` utility -7. String pattern matching in tests needs escaping for special characters (like asterisks) -8. Exclude test library tests from your test runs to avoid conflicts -9. A well-structured .gitignore file helps keep test dependencies out of source control - diff --git a/stp/prj/st/NOT-STARTED/ST0015/results.md b/stp/prj/st/NOT-STARTED/ST0015/results.md deleted file mode 100644 index 837ca13..0000000 --- a/stp/prj/st/NOT-STARTED/ST0015/results.md +++ /dev/null @@ -1,21 +0,0 @@ -# Results - ST0015: Enhanced Steel Thread Templates and File Types - -## Results - -[Summary of results after completion, including outcomes and any follow-up work needed] - -## Outcomes - -[Specific outcomes achieved] - -## Metrics - -[Any relevant metrics or measurements] - -## Lessons Learned - -[Key lessons learned during implementation] - -## Follow-up Work - -[Any follow-up work identified] \ No newline at end of file diff --git a/stp/prj/st/ST0010/results.md b/stp/prj/st/ST0010/results.md deleted file mode 100644 index adea7f0..0000000 --- a/stp/prj/st/ST0010/results.md +++ /dev/null @@ -1,6 +0,0 @@ -# Results - ST0010: Anthropic MCP Integration - -## Results - -[Summary of results after completion, including outcomes, lessons learned, and any follow-up work needed] - diff --git a/stp/prj/st/steel_threads.md b/stp/prj/st/steel_threads.md deleted file mode 100644 index 4ace6be..0000000 --- a/stp/prj/st/steel_threads.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -verblock: "20 Mar 2025:v0.1: Claude - Updated with new directory structure" -stp_version: 1.2.0 ---- -# Steel Threads - -This document serves as an index of all steel threads in the Steel Thread Process (STP) system. A steel thread represents a self-contained unit of work that focuses on implementing a specific piece of functionality. - -## Index - -<!-- BEGIN: STEEL_THREAD_INDEX --> -ID | Title | Status | Created | Completed ------------|------------------------------------------------------------|------------------|------------|----------- -ST0014 | Directory Structure for Steel Threads | In Progress | 2025-03-20 | -ST0013 | STP Blog Post Series | Completed | 2025-03-11 | 2025-07-08 -ST0012 | Document Sync Command | Completed | 2025-03-07 | 2025-03-07 -ST0011 | Test Suite Implementation | Not Started | 2025-06-03 | 2025-06-03 -ST0010 | Anthropic MCP Integration | On Hold | 2025-06-03 | -ST0009 | Process Refinement | Completed | 2025-03-06 | 2025-06-03 -ST0008 | LLM Integration | Completed | 2025-03-06 | 2025-06-03 -ST0007 | User Documentation | Completed | 2025-03-06 | 2025-06-03 -ST0006 | Help System | Completed | 2025-03-06 | 2025-06-03 -ST0005 | Initialization Command | Completed | 2025-03-06 | 2025-06-03 -ST0004 | Steel Thread Commands | Completed | 2025-03-06 | 2025-06-03 -ST0003 | Template System | Completed | 2025-03-06 | 2025-06-03 -ST0002 | Core Script Framework | Completed | 2025-03-06 | 2025-06-03 -ST0001 | Directory Structure | Completed | 2025-03-06 | 2025-06-03 -<!-- END: STEEL_THREAD_INDEX --> - -## Steel Thread Status Definitions - -<!-- BEGIN: STATUS_DEFINITIONS --> -- **Not Started**: Steel thread has been created but work has not begun (stp/prj/st/NOT-STARTED/) -- **In Progress**: Work is actively being done on this steel thread (stp/prj/st/) -- **Completed**: All tasks have been completed and the steel thread is finished (stp/prj/st/COMPLETED) -- **On Hold**: Work has been temporarily paused (stp/prj/st) -- **Cancelled**: The steel thread has been cancelled and will not be completed (stp/prj/st/CANCELLED) -<!-- END: STATUS_DEFINITIONS --> - -## Context for LLM - -This document provides an overview of all steel threads in the STP project. It helps track the progress of individual pieces of work and serves as a navigation aid for finding specific steel thread documents. - -### How to use this document - -<!-- BEGIN: USAGE_INSTRUCTIONS --> -1. Update the index when creating new steel threads -2. Update the status of steel threads as they progress -3. Add completion dates when steel threads are finished -4. Use this document to quickly locate specific steel thread documents -<!-- END: USAGE_INSTRUCTIONS --> - -The detailed information for each steel thread is contained in its individual document (e.g., ST0001.md). -| ST0014 | Directory Structure for Steel Threads | Completed | | 2025-07-09 | -| ST0015 | Enhanced Steel Thread Templates and File Types | Not Started | 2025-07-09 | | diff --git a/stp/prj/wip.md b/stp/prj/wip.md deleted file mode 100644 index bc5a08a..0000000 --- a/stp/prj/wip.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 ---- -# Work In Progress - -This file serves as a placeholder for kicking off new sessions. - -See the following files for detailed information about the project: - -- [Technical Product Design](../eng/tpd/technical_product_design.md) -- [Steel Threads Overview](st/steel_threads.md) - -Read CLAUDE.md then wait for instruction. diff --git a/stp/tests/.gitignore b/stp/tests/.gitignore deleted file mode 100644 index 8e47bed..0000000 --- a/stp/tests/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -# Bats libraries (should be installed by setup_test_env.sh) -lib/bats-* - -# Temporary test files -tmp/ - -# Editor backups -*.bak -*.tmp \ No newline at end of file diff --git a/stp/tests/README.md b/stp/tests/README.md deleted file mode 100644 index 2c961fe..0000000 --- a/stp/tests/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# STP Test Suite - -This directory contains automated tests for the Steel Thread Process (STP) utilities. - -## Overview - -The STP test suite uses [Bats](https://github.com/bats-core/bats-core) (Bash Automated Testing System) to test the functionality of STP scripts and commands. - -## Directory Structure - -- `/bootstrap`: Tests for the bootstrap script -- `/init`: Tests for the initialization command -- `/st`: Tests for steel thread management -- `/help`: Tests for the help command -- `/main`: Tests for the main stp script -- `/fixtures`: Test fixtures and mock environments -- `/lib`: Test helpers and utility functions - -## Setup - -### Automated Setup - -The easiest way to set up the test environment is to use the provided setup script: - -```bash -./setup_test_env.sh -``` - -This will install Bats and the required libraries. On macOS with Homebrew, it will use `brew install bats-core` for convenience. - -### Manual Setup - -1. Install Bats: - - **Using Homebrew (macOS):** - ```bash - brew install bats-core - ``` - - **From Source:** - ```bash - git clone https://github.com/bats-core/bats-core.git - cd bats-core - ./install.sh /usr/local - ``` - -2. Install Bats libraries: - ```bash - mkdir -p stp/tests/lib - git clone https://github.com/bats-core/bats-support.git stp/tests/lib/bats-support - git clone https://github.com/bats-core/bats-assert.git stp/tests/lib/bats-assert - git clone https://github.com/bats-core/bats-file.git stp/tests/lib/bats-file - ``` - -## Running Tests - -The test suite includes a helper script that makes it easy to run tests: - -```bash -# Navigate to the tests directory -cd /path/to/STP/stp/tests/ - -# Run the test script (runs all tests) -./run_tests.sh - -# Run only specific tests (e.g., bootstrap tests) -./run_tests.sh bootstrap - -# Run a specific test file -./run_tests.sh bootstrap/bootstrap_test.bats -``` - -Alternatively, you can run the tests directly with Bats: - -```bash -# Run all tests from the project root -bats stp/tests/**/*.bats - -# Run only bootstrap tests -bats stp/tests/bootstrap/*.bats -``` - -## Writing Tests - -Each test file should follow this pattern: - -```bash -#!/usr/bin/env bats - -load '../lib/test_helper' - -setup() { - # Set up test environment -} - -teardown() { - # Clean up after test -} - -@test "Test description" { - # Test code - run some_command - assert_success - assert_output "Expected output" -} -``` - -## Test Fixtures - -Test fixtures are stored in the `fixtures` directory and provide known states -and environments for tests to run against. - -## Test Helper Functions - -Common test helper functions are defined in the `lib/test_helper.bash` file. - -## Interactive Script Testing - -For testing scripts that require user input (like confirming operations): - -1. Use the `expect` utility to automate interactive testing: - -```bash -#!/usr/bin/expect -f -set timeout 5 -spawn ./command_to_test arg1 arg2 -expect "Prompt message" -send "y\r" -expect eof -``` - -2. Create the expect script in your test's setup function and call it from your test case. - -## Tips for Reliable Testing - -1. **Special Characters**: When testing for strings with special characters like asterisks (`*`), - use the `-F` flag with grep: - - ```bash - run grep -F "**bold text**" file.md - ``` - -2. **Exclude Library Tests**: The test runner excludes tests in the `/lib/` directory to avoid - running tests from the Bats libraries themselves. - -3. **`.gitignore`**: The test directory includes a `.gitignore` file to exclude the Bats library - directories and temporary files from source control. \ No newline at end of file diff --git a/stp/tests/backlog/backlog_test.bats b/stp/tests/backlog/backlog_test.bats deleted file mode 100644 index 6fca6b6..0000000 --- a/stp/tests/backlog/backlog_test.bats +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_backlog wrapper - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/backlog-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the backlog scripts to the test directory - cp "${STP_BIN_DIR}/stp_backlog" "${TEST_TEMP_DIR}/" - cp "${STP_BIN_DIR}/stp_bl" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_backlog" - chmod +x "${TEST_TEMP_DIR}/stp_bl" - - # Set STP_HOME for the test - export STP_HOME="${TEST_TEMP_DIR}" - - # Create minimal directory structure - mkdir -p "stp/bin" - cp "${STP_BIN_DIR}/stp" "stp/bin/" - chmod +x "stp/bin/stp" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if backlog shows help -@test "backlog shows help with no arguments" { - run ./stp_backlog - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp backlog"* ]] - [[ "$output" == *"STP wrapper for Backlog.md"* ]] -} - -# Test bl alias -@test "bl alias works the same as backlog" { - run ./stp_bl --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp backlog"* ]] -} - -# Test init command -@test "backlog init configures for local use" { - # Mock the backlog command - create_mock_command "backlog" 0 "Backlog initialized" - - # Create a fake config file that init would create - mkdir -p backlog - cat > backlog/config.yml << EOF -project_name: "test" -remote_operations: true -default_status: "todo" -EOF - - run ./stp_backlog init - [ "$status" -eq 0 ] - [[ "$output" == *"Configuring backlog for STP integration"* ]] - [[ "$output" == *"Backlog configured for local STP use"* ]] -} - -# Test create command validation -@test "backlog create validates steel thread ID" { - run ./stp_backlog create INVALID "Test task" - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test create command with valid ID -@test "backlog create works with valid steel thread ID" { - # Mock the backlog command - create_mock_command "backlog" 0 "Created task task-1" - - run ./stp_backlog create ST0014 "Test task" - [ "$status" -eq 0 ] -} - -# Test list command adds --plain -@test "backlog list automatically adds --plain flag" { - # Create a mock backlog that shows what arguments it received - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Arguments: $*" -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_backlog list - [ "$status" -eq 0 ] - [[ "$output" == *"Arguments: task list --plain"* ]] -} - -# Test list doesn't duplicate --plain -@test "backlog list doesn't duplicate --plain flag" { - # Create a mock backlog that shows what arguments it received - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Arguments: $*" -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_backlog list --plain - [ "$status" -eq 0 ] - [[ "$output" == *"Arguments: task list --plain"* ]] - # Make sure --plain doesn't appear twice - ! [[ "$output" == *"--plain --plain"* ]] -} - -# Test board command does NOT add --plain -@test "backlog board does NOT add --plain flag" { - # Create a mock backlog that shows what arguments it received - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Arguments: $*" -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_backlog board - [ "$status" -eq 0 ] - [[ "$output" == *"Arguments: board"* ]] - [[ "$output" != *"--plain"* ]] -} - -# Test pass-through of other commands -@test "backlog passes through other commands unchanged" { - # Create a mock backlog that shows what arguments it received - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Arguments: $*" -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_backlog config get projectName - [ "$status" -eq 0 ] - [[ "$output" == *"Arguments: config get projectName"* ]] -} \ No newline at end of file diff --git a/stp/tests/backlog/tasks/task-1 - Test-task.md b/stp/tests/backlog/tasks/task-1 - Test-task.md deleted file mode 100644 index fcd4e1a..0000000 --- a/stp/tests/backlog/tasks/task-1 - Test-task.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: task-1 -title: Test task -status: To Do -assignee: [] -created_date: '2025-07-08' -labels: [] -dependencies: [] ---- - -## Description diff --git a/stp/tests/bl/bl_test.bats b/stp/tests/bl/bl_test.bats deleted file mode 100644 index cd52278..0000000 --- a/stp/tests/bl/bl_test.bats +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp bl command (shorthand for stp backlog) - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/bl-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy required scripts - cp "${STP_BIN_DIR}/stp_backlog" "${TEST_TEMP_DIR}/" - cp "${STP_BIN_DIR}/stp_bl" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_backlog" - chmod +x "${TEST_TEMP_DIR}/stp_bl" - - # Create bin directory structure for STP_HOME - mkdir -p "${TEST_TEMP_DIR}/stp/bin" - cp "${STP_BIN_DIR}/stp" "${TEST_TEMP_DIR}/stp/bin/" - cp "${STP_BIN_DIR}/stp_backlog" "${TEST_TEMP_DIR}/stp/bin/" - cp "${STP_BIN_DIR}/stp_bl" "${TEST_TEMP_DIR}/stp/bin/" - chmod +x "${TEST_TEMP_DIR}/stp/bin/"* - - # Set STP_HOME for the test - export STP_HOME="${TEST_TEMP_DIR}" - - # Mock backlog command for most tests - mkdir -p "${TEST_TEMP_DIR}/bin" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test stp bl command works -@test "stp bl shows help" { - run ./stp_bl - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp backlog"* ]] - [[ "$output" == *"stp bl"* ]] -} - -# Test stp bl list command -@test "stp bl list adds --plain automatically" { - # Mock backlog to show arguments - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Backlog called with: $*" -if [[ "$*" == "task list --plain" ]]; then - echo "To Do:" - echo " task-1 - Test task" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl list - [ "$status" -eq 0 ] - [[ "$output" == *"Backlog called with: task list --plain"* ]] - [[ "$output" == *"task-1 - Test task"* ]] -} - -# Test stp bl create command -@test "stp bl create validates and creates task" { - # Mock backlog for task creation - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -if [[ "$1" == "task" && "$2" == "create" && "$3" == "ST0014 - Test task" ]]; then - echo "Created task task-1" - echo "File: backlog/tasks/task-1 - ST0014-Test-task.md" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl create ST0014 "Test task" - [ "$status" -eq 0 ] - [[ "$output" == *"Created task task-1"* ]] -} - -# Test stp bl create with invalid ID -@test "stp bl create rejects invalid steel thread ID" { - run ./stp_bl create INVALID "Test task" - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test stp bl board command -@test "stp bl board passes through without --plain" { - # Mock backlog to show arguments - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Backlog called with: $*" -if [[ "$*" == "board" ]]; then - echo "Kanban board displayed" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl board - [ "$status" -eq 0 ] - [[ "$output" == *"Backlog called with: board"* ]] - [[ "$output" == *"Kanban board displayed"* ]] -} - -# Test stp bl init command -@test "stp bl init configures backlog for STP" { - # Mock backlog for init and config - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -if [[ "$1" == "init" ]]; then - mkdir -p backlog - echo "project_name: test" > backlog/config.yml - echo "Backlog initialized" - exit 0 -elif [[ "$1" == "config" && "$2" == "set" ]]; then - echo "Config set: $3 = $4" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl init - [ "$status" -eq 0 ] - [[ "$output" == *"Backlog initialized"* ]] - [[ "$output" == *"Configuring backlog for STP integration"* ]] - [[ "$output" == *"Backlog configured for local STP use"* ]] -} - -# Test pass-through of task subcommands -@test "stp bl task edit passes through correctly" { - # Mock backlog to show arguments - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Backlog called with: $*" -if [[ "$*" == "task edit task-5 --status Done" ]]; then - echo "Task updated" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl task edit task-5 --status Done - [ "$status" -eq 0 ] - [[ "$output" == *"Backlog called with: task edit task-5 --status Done"* ]] - [[ "$output" == *"Task updated"* ]] -} - -# Test that other commands are passed through -@test "stp bl passes through unknown commands" { - # Mock backlog to show arguments - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -echo "Backlog called with: $*" -exit 0 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - - run ./stp_bl decision create "Architecture choice" - [ "$status" -eq 0 ] - [[ "$output" == *"Backlog called with: decision create Architecture choice"* ]] -} - -# Test integration through main stp command -@test "stp bl works through main stp command" { - # Ensure help file exists - mkdir -p "stp/bin/.help" - echo "@short:" > "stp/bin/.help/bl.help.md" - echo "Shorthand for backlog" >> "stp/bin/.help/bl.help.md" - - run stp/bin/stp bl --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp backlog"* ]] -} \ No newline at end of file diff --git a/stp/tests/bootstrap/bootstrap_test.bats b/stp/tests/bootstrap/bootstrap_test.bats deleted file mode 100644 index 9d0c38e..0000000 --- a/stp/tests/bootstrap/bootstrap_test.bats +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env bats -# Tests for the bootstrap script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/bootstrap-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the bootstrap script to the test directory - cp "${STP_BIN_DIR}/bootstrap" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/bootstrap" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if bootstrap runs without error -@test "bootstrap executes without error" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - [ -n "$output" ] -} - -# Test if bootstrap creates the expected directory structure -@test "bootstrap creates the correct directory structure" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check main directories - assert_directory_exists "stp" - assert_directory_exists "stp/bin" - assert_directory_exists "stp/prj" - assert_directory_exists "stp/prj/st" - assert_directory_exists "stp/eng" - assert_directory_exists "stp/eng/tpd" - assert_directory_exists "stp/usr" - assert_directory_exists "stp/llm" - assert_directory_exists "stp/_templ" - assert_directory_exists "bin" -} - -# Test if bootstrap creates the expected template files -@test "bootstrap creates the correct template files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check template files - assert_file_exists "stp/_templ/prj/_wip.md" - assert_file_exists "stp/_templ/prj/_journal.md" - assert_file_exists "stp/_templ/prj/st/_steel_threads.md" - assert_file_exists "stp/_templ/prj/st/_ST####.md" - assert_file_exists "stp/_templ/eng/tpd/_technical_product_design.md" - assert_file_exists "stp/_templ/usr/_user_guide.md" - assert_file_exists "stp/_templ/llm/_llm_preamble.md" -} - -# Test if bootstrap creates the expected project files -@test "bootstrap creates the correct project files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check project files - assert_file_exists "stp/prj/wip.md" - assert_file_exists "stp/prj/journal.md" - assert_file_exists "stp/prj/st/steel_threads.md" - assert_file_exists "stp/prj/st/ST0001.md" - assert_file_exists "stp/prj/st/ST0002.md" -} - -# Test if bootstrap creates the expected engineering files -@test "bootstrap creates the correct engineering files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check engineering files - assert_file_exists "stp/eng/tpd/technical_product_design.md" - assert_file_exists "stp/eng/tpd/1_introduction.md" - assert_file_exists "stp/eng/tpd/2_requirements.md" - assert_file_exists "stp/eng/tpd/3_architecture.md" - assert_file_exists "stp/eng/tpd/4_detailed_design.md" - assert_file_exists "stp/eng/tpd/5_implementation_strategy.md" - assert_file_exists "stp/eng/tpd/6_deployment_and_operations.md" - assert_file_exists "stp/eng/tpd/7_technical_challenges_and_mitigations.md" - assert_file_exists "stp/eng/tpd/8_appendices.md" -} - -# Test if bootstrap creates the expected user documentation files -@test "bootstrap creates the correct user documentation files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check user documentation files - assert_file_exists "stp/usr/user_guide.md" - assert_file_exists "stp/usr/reference_guide.md" - assert_file_exists "stp/usr/deployment_guide.md" -} - -# Test if bootstrap creates the expected LLM files -@test "bootstrap creates the correct LLM files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check LLM files - assert_file_exists "stp/llm/llm_preamble.md" -} - -# Test if bootstrap creates files with the correct author information -@test "bootstrap uses the provided author name in files" { - local author="Custom Author" - run ./bootstrap "$author" - [ "$status" -eq 0 ] - - # Check if author is correctly set in a file - assert_file_contains "stp/prj/wip.md" "$author" -} - -# Test if bootstrap uses git config for author name when not provided -@test "bootstrap uses git config for author name when not provided" { - # Set up git environment - git init -q - git config --local user.name "Git User" - - run ./bootstrap - [ "$status" -eq 0 ] - - # Check if git user name is used - assert_file_contains "stp/prj/wip.md" "Git User" -} - -# Test if bootstrap creates script files -@test "bootstrap creates script files" { - run ./bootstrap "Test User" - [ "$status" -eq 0 ] - - # Check script files existence - assert_file_exists "bin/stp" - assert_file_exists "bin/stp_init" - assert_file_exists "bin/stp_st" - assert_file_exists "bin/stp_help" - assert_file_exists "bin/bootstrap" - - assert_file_exists "stp/bin/stp" - assert_file_exists "stp/bin/stp_init" - assert_file_exists "stp/bin/stp_st" - assert_file_exists "stp/bin/stp_help" - assert_file_exists "stp/bin/bootstrap" -} \ No newline at end of file diff --git a/stp/tests/help/help_test.bats b/stp/tests/help/help_test.bats deleted file mode 100644 index 9ff4985..0000000 --- a/stp/tests/help/help_test.bats +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_help script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/help-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the help script to the test directory - cp "${STP_BIN_DIR}/stp_help" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_help" - - # Create minimal STP_HOME structure with required directories - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/bin/.help" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/usr" - - # Create sample help files with proper format - cat > "${TEST_TEMP_DIR}/stp_home/stp/bin/.help/init.help.md" << EOF -# init command - -@short: -Initialize a new STP project - -@description: -Detailed description here -EOF - - cat > "${TEST_TEMP_DIR}/stp_home/stp/bin/.help/st.help.md" << EOF -# st command - -@short: -Manage steel threads - -@description: -Detailed description here -EOF - - # Create sample scripts - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_init" - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_st" - chmod +x "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_init" - chmod +x "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_st" - - # Set STP_HOME environment variable - export STP_HOME="${TEST_TEMP_DIR}/stp_home" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - unset STP_HOME - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if help requires STP_HOME to be set -@test "help requires STP_HOME environment variable" { - unset STP_HOME - run ./stp_help - [ "$status" -ne 0 ] - [[ "$output" == *"STP_HOME environment variable is not set"* ]] -} - -# Test if help displays general help when no command is specified -@test "help displays general help when no command is specified" { - run ./stp_help - [ "$status" -eq 0 ] - [[ "$output" == *"STP - Steel Thread Process"* ]] - [[ "$output" == *"Available commands:"* ]] - [[ "$output" == *"init"* ]] - [[ "$output" == *"st"* ]] -} - -# Test if help displays command-specific help when a command is specified -@test "help displays command-specific help when a command is specified" { - run ./stp_help init - [ "$status" -eq 0 ] - [[ "$output" == *"init command"* ]] -} - -# Test if help shows short descriptions from help files -@test "help shows short descriptions from help files" { - run ./stp_help - [ "$status" -eq 0 ] - # Use regex pattern to match the output with flexible whitespace - [[ "$output" =~ init[[:space:]]+Initialize[[:space:]]a[[:space:]]new[[:space:]]STP[[:space:]]project ]] - [[ "$output" =~ st[[:space:]]+Manage[[:space:]]steel[[:space:]]threads ]] -} - -# Test if help handles unknown commands correctly -@test "help handles unknown commands correctly" { - run ./stp_help unknown_command - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown command 'unknown_command'"* ]] -} - -# Test if help handles commands with no help files correctly -@test "help handles commands with no help files correctly" { - # Create a command with no help file - touch "${STP_HOME}/stp/bin/stp_nohelp" - chmod +x "${STP_HOME}/stp/bin/stp_nohelp" - - run ./stp_help nohelp - [ "$status" -eq 0 ] - [[ "$output" == *"No help available for command 'nohelp'"* ]] -} \ No newline at end of file diff --git a/stp/tests/init/init_test.bats b/stp/tests/init/init_test.bats deleted file mode 100644 index 36d5aa6..0000000 --- a/stp/tests/init/init_test.bats +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_init script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/init-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the init script to the test directory - cp "${STP_BIN_DIR}/stp_init" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_init" - - # Create a minimal STP_HOME structure with required directories - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/_templ/prj/st" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/_templ/eng/tpd" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/_templ/usr" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/_templ/llm" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/bin" - - # Create minimal template files - create_template_files - - # Set STP_HOME environment variable - export STP_HOME="${TEST_TEMP_DIR}/stp_home" - - # Create expect script for running stp_init non-interactively - cat > "${TEST_TEMP_DIR}/run_init.exp" << 'EOF' -#!/usr/bin/expect -f -set timeout 5 -set project_name [lindex $argv 0] -set target_dir [lindex $argv 1] - -# Get the command to run -if {$target_dir eq ""} { - set cmd "./stp_init \"$project_name\"" -} else { - set cmd "./stp_init \"$project_name\" \"$target_dir\"" -} - -# Execute the command -spawn {*}$cmd - -# Handle any "directory not empty" prompts -expect { - "Press Enter to continue or Ctrl+C to cancel" { - send "\r" - exp_continue - } - timeout { - exit 1 - } - eof -} -EOF - - chmod +x "${TEST_TEMP_DIR}/run_init.exp" - - # Skip tests if expect is not available - if ! command -v expect &> /dev/null; then - skip "expect command is not available" - fi -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - unset STP_HOME - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Helper function to create minimal template files for testing -create_template_files() { - # Create template files - echo "Template WIP" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/prj/_wip.md" - echo "Template Journal" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/prj/_journal.md" - echo "Template Steel Threads" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/prj/st/_steel_threads.md" - echo "Template ST####" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/prj/st/_ST####.md" - echo "Template TPD" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/eng/tpd/_technical_product_design.md" - echo "Template User Guide" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/usr/_user_guide.md" - echo "Template Reference Guide" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/usr/_reference_guide.md" - echo "Template Deployment Guide" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/usr/_deployment_guide.md" - echo "Template LLM Preamble" > "${TEST_TEMP_DIR}/stp_home/stp/_templ/llm/_llm_preamble.md" - - # Create mock scripts - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp" - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_init" - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_st" - touch "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_help" - chmod +x "${TEST_TEMP_DIR}/stp_home/stp/bin/"* -} - -# Test if init requires STP_HOME to be set -@test "init requires STP_HOME environment variable" { - unset STP_HOME - run ./stp_init "Test Project" - [ "$status" -ne 0 ] - [[ "$output" == *"STP_HOME environment variable is not set"* ]] -} - -# Test if init requires project name argument -@test "init requires project name argument" { - run ./stp_init - [ "$status" -ne 0 ] - [[ "$output" == *"Project name is required"* ]] -} - -# Test if init creates a project in the current directory by default -@test "init creates a project in the current directory by default" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/test-dir" - cd "${TEST_TEMP_DIR}/test-dir" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Run with expect to handle interactive prompts - run ./run_init.exp "Test Project" - [ "$status" -eq 0 ] - - # Check if project was created in current directory with default directories - assert_directory_exists "stp" - assert_directory_exists "stp/.config" - assert_directory_exists "stp/prj" - assert_directory_exists "stp/eng" - assert_directory_exists "stp/usr" - assert_directory_exists "stp/llm" - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} - -# Test if init creates a project in a specified directory -@test "init creates a project in a specified directory" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/specified-dir-test" - cd "${TEST_TEMP_DIR}/specified-dir-test" - - # Create target directory - mkdir -p "${TEST_TEMP_DIR}/specified-dir-test/target-dir" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Run with expect to handle interactive prompts - run ./run_init.exp "Test Project" "target-dir" - [ "$status" -eq 0 ] - - # Check if project was created in the specified directory with default directories - assert_directory_exists "target-dir/stp" - assert_directory_exists "target-dir/stp/.config" - assert_directory_exists "target-dir/stp/prj" - assert_directory_exists "target-dir/stp/eng" - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} - -# Test if init creates the configuration file -@test "init creates the configuration file" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/config-test" - cd "${TEST_TEMP_DIR}/config-test" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Run with expect to handle interactive prompts - run ./run_init.exp "Test Project" - [ "$status" -eq 0 ] - - # Check if configuration file was created - assert_file_exists "stp/.config/config" - assert_file_contains "stp/.config/config" "PROJECT_NAME=\"Test Project\"" - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} - -# Test if init creates project files from templates -@test "init creates project files from templates" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/template-test" - cd "${TEST_TEMP_DIR}/template-test" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Run with expect to handle interactive prompts - run ./run_init.exp "Test Project" - [ "$status" -eq 0 ] - - # Check if files were created from templates - assert_file_exists "stp/prj/wip.md" - assert_file_exists "stp/prj/st/steel_threads.md" - assert_file_exists "stp/eng/tpd/technical_product_design.md" - assert_file_exists "stp/usr/user_guide.md" - assert_file_exists "stp/usr/reference_guide.md" - assert_file_exists "stp/usr/deployment_guide.md" - assert_file_exists "stp/llm/llm_preamble.md" - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} - -# Test if init copies scripts and makes them executable when --all is specified -@test "init copies scripts and makes them executable" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/scripts-test" - cd "${TEST_TEMP_DIR}/scripts-test" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Create a modified expect script that uses the --all flag - cat > "./run_init_all.exp" << 'EOF' -#!/usr/bin/expect -f -set timeout 5 -set project_name [lindex $argv 0] -set target_dir [lindex $argv 1] - -# Get the command to run -if {$target_dir eq ""} { - set cmd "./stp_init --all \"$project_name\"" -} else { - set cmd "./stp_init --all \"$project_name\" \"$target_dir\"" -} - -# Execute the command -spawn {*}$cmd - -# Handle any "directory not empty" prompts -expect { - "Press Enter to continue or Ctrl+C to cancel" { - send "\r" - exp_continue - } - timeout { - exit 1 - } - eof -} -EOF - chmod +x ./run_init_all.exp - - # Run with expect to handle interactive prompts - run ./run_init_all.exp "Test Project" - [ "$status" -eq 0 ] - - # Check if scripts were copied and are executable - assert_file_exists "stp/bin/stp" - assert_file_exists "stp/bin/stp_init" - assert_file_exists "stp/bin/stp_st" - assert_file_exists "stp/bin/stp_help" - - [ -x "stp/bin/stp" ] - [ -x "stp/bin/stp_init" ] - [ -x "stp/bin/stp_st" ] - [ -x "stp/bin/stp_help" ] - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} - -# Test if init handles non-empty target directory -@test "init warns about non-empty target directory but continues" { - mkdir -p "${TEST_TEMP_DIR}/non-empty-dir" - touch "${TEST_TEMP_DIR}/non-empty-dir/existing-file.txt" - - # Use expect to handle interactive prompt - cat > "${TEST_TEMP_DIR}/expect-script" << EOF -#!/usr/bin/expect -f -set timeout 5 -spawn ./stp_init "Test Project" "${TEST_TEMP_DIR}/non-empty-dir" -expect "Warning: Target directory is not empty" -send "\r" -expect eof -EOF - - chmod +x "${TEST_TEMP_DIR}/expect-script" - - # Skip this test if expect is not available - if ! command -v expect &> /dev/null; then - skip "expect command is not available" - fi - - run "${TEST_TEMP_DIR}/expect-script" - - # Check if project was created despite directory not being empty - assert_directory_exists "${TEST_TEMP_DIR}/non-empty-dir/stp" - assert_file_exists "${TEST_TEMP_DIR}/non-empty-dir/existing-file.txt" -} - -# Test if init creates local configuration for STP -@test "init creates local configuration for STP" { - # Create a clean test directory - mkdir -p "${TEST_TEMP_DIR}/config-local-test" - cd "${TEST_TEMP_DIR}/config-local-test" - - # Copy necessary files - cp "${TEST_TEMP_DIR}/stp_init" ./ - cp "${TEST_TEMP_DIR}/run_init.exp" ./ - chmod +x ./stp_init ./run_init.exp - - # Run with expect to handle interactive prompts - run ./run_init.exp "Test Project" - [ "$status" -eq 0 ] - - # Check if local configuration was created - assert_file_exists "stp/.config/stp_config.sh" - assert_file_contains "stp/.config/stp_config.sh" "export STP_PROJECT=\"Test Project\"" - - # Return to the original test directory - cd "${TEST_TEMP_DIR}" -} \ No newline at end of file diff --git a/stp/tests/integration/README.md b/stp/tests/integration/README.md deleted file mode 100644 index 7d76910..0000000 --- a/stp/tests/integration/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# STP-Backlog Integration Tests - -This directory contains integration tests that verify STP and Backlog.md work together correctly. - -## Running the Tests - -```bash -# Run all integration tests -bats integration/stp_backlog_integration_test.bats - -# Run a specific test -bats integration/stp_backlog_integration_test.bats --filter "stp bl create" -``` - -## What These Tests Verify - -### Core Integration Points - -1. **Task Creation** - Tasks created through `stp bl create` are properly stored in Backlog format -2. **Task Listing** - Both `stp bl list` and `stp task list` correctly display tasks -3. **Task Naming** - Tasks follow the `task-<number>` naming convention required by Backlog.md -4. **Status Sync** - Task completion status properly syncs with steel thread status -5. **YAML Structure** - Created tasks have correct YAML frontmatter -6. **Special Characters** - Task titles with quotes and special characters are handled correctly -7. **Git Error Prevention** - The STP wrapper successfully prevents git-related errors -8. **Task Counting** - Task counts are accurate across different commands -9. **Error Handling** - Invalid steel thread IDs are properly rejected - -### Known Limitations - -1. **Task ID Format** - Backlog.md requires `task-<number>` format and cannot be customized -2. **Steel Thread Validation** - `stp bl create` doesn't validate if steel thread exists (by design) -3. **Migration Specifics** - `stp migrate` requires a specific "## Tasks" section format -4. **Browser Launch** - `stp bl board` opens a browser which can't be fully tested in CI - -## Test Results Summary - -All 13 tests pass consistently, verifying the core integration between STP and Backlog.md works correctly. - -## Key Findings - -- The integration works well for the core use cases -- Task naming must follow Backlog's conventions (`task-<number>`) -- The STP wrapper successfully prevents git-related errors -- Status synchronization between tasks and steel threads functions correctly -- Special characters in task titles are handled properly - -## Future Improvements - -1. Add tests for draft task functionality -2. Test concurrent task creation -3. Verify task deletion and archiving -4. Test edge cases like very long task titles -5. Add performance tests for large numbers of tasks \ No newline at end of file diff --git a/stp/tests/integration/stp_backlog_integration_test.bats b/stp/tests/integration/stp_backlog_integration_test.bats deleted file mode 100755 index 3ac4d5a..0000000 --- a/stp/tests/integration/stp_backlog_integration_test.bats +++ /dev/null @@ -1,322 +0,0 @@ -#!/usr/bin/env bats -# Integration tests for STP and Backlog.md -# Tests the integration between STP commands and Backlog functionality - -load ../lib/test_helper.bash - -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/stp-backlog-integration-test-XXXXXX")" - cd "$TEST_TEMP_DIR" - - # Initialize STP structure - mkdir -p stp/{bin,prj/st} - mkdir -p backlog/tasks - - # Copy necessary STP scripts - cp "${STP_BIN_DIR}/stp" stp/bin/ - cp "${STP_BIN_DIR}/stp_st" stp/bin/ - cp "${STP_BIN_DIR}/stp_task" stp/bin/ - cp "${STP_BIN_DIR}/stp_status" stp/bin/ - cp "${STP_BIN_DIR}/stp_backlog" stp/bin/ - cp "${STP_BIN_DIR}/stp_bl" stp/bin/ - cp "${STP_BIN_DIR}/stp_migrate" stp/bin/ - chmod +x stp/bin/* - - # Set up environment - export STP_HOME="$TEST_TEMP_DIR" - export PATH="$TEST_TEMP_DIR/stp/bin:$PATH" - - # Create a test steel thread - cat > "stp/prj/st/ST0099.md" << 'EOF' ---- -status: Not Started -created: 20250101 ---- -# ST0099: Test Integration Thread - -## Objective -Test the integration between STP and Backlog - -## Context -This is a test steel thread for integration testing -EOF - - # Initialize backlog config - cat > "backlog/config.yml" << 'EOF' -project_name: "test-project" -default_status: "To Do" -statuses: ["To Do", "In Progress", "Done"] -labels: [] -milestones: [] -date_format: yyyy-mm-dd -max_column_width: 20 -backlog_directory: "backlog" -auto_open_browser: false -default_port: 6420 -remote_operations: false -auto_commit: false -EOF -} - -teardown() { - cd "$BATS_TEST_TMPDIR" - rm -rf "$TEST_TEMP_DIR" -} - -# Test 1: Verify backlog is available -@test "integration: backlog command is available" { - run command -v backlog - [ "$status" -eq 0 ] - [[ -n "$output" ]] -} - -# Test 2: Create task through STP wrapper -@test "integration: stp bl create creates backlog task" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - run stp bl create ST0099 "Integration test task" - [ "$status" -eq 0 ] - [[ "$output" == *"Created task"* ]] - - # Verify task file was created - run ls backlog/tasks/ - [ "$status" -eq 0 ] - [[ "$output" == *"task-"* ]] - [[ "$output" == *"ST0099"* ]] -} - -# Test 3: List tasks through STP wrapper -@test "integration: stp bl list shows created tasks" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create a task first - stp bl create ST0099 "Test task for listing" >/dev/null 2>&1 - - run stp bl list - [ "$status" -eq 0 ] - [[ "$output" == *"ST0099"* ]] - [[ "$output" == *"Test task for listing"* ]] -} - -# Test 4: Task list command shows backlog tasks -@test "integration: stp task list shows tasks for steel thread" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create multiple tasks - stp bl create ST0099 "First task" >/dev/null 2>&1 - stp bl create ST0099 "Second task" >/dev/null 2>&1 - - run stp task list ST0099 - [ "$status" -eq 0 ] - [[ "$output" == *"Tasks for ST0099:"* ]] - [[ "$output" == *"First task"* ]] - [[ "$output" == *"Second task"* ]] - [[ "$output" == *"[todo]"* ]] -} - -# Test 5: Status synchronization -@test "integration: stp status show reflects task completion" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create tasks - stp bl create ST0099 "Task one" >/dev/null 2>&1 - stp bl create ST0099 "Task two" >/dev/null 2>&1 - - run stp status show ST0099 - [ "$status" -eq 0 ] - [[ "$output" == *"Steel Thread: ST0099"* ]] - [[ "$output" == *"Current Status: Not Started"* ]] - [[ "$output" == *"Total Tasks: 2"* ]] - [[ "$output" == *"Todo: 2"* ]] -} - -# Test 6: Task naming convention -@test "integration: task files follow expected naming pattern" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create a task - output=$(stp bl create ST0099 "Naming test task" 2>&1) - - # Extract task ID from output - task_id=$(echo "$output" | grep -oE "task-[0-9]+" | head -1) - - # Verify file exists with correct pattern - run ls "backlog/tasks/${task_id} - ST0099-Naming-test-task.md" - [ "$status" -eq 0 ] -} - -# Test 7: Task content structure -@test "integration: created task has correct YAML structure" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create a task - stp bl create ST0099 "Structure test" >/dev/null 2>&1 - - # Find the created task file - task_file=$(ls backlog/tasks/*ST0099*Structure* | head -1) - - # Check YAML frontmatter - run grep "^id: task-" "$task_file" - [ "$status" -eq 0 ] - - run grep "^title: ST0099 - Structure test" "$task_file" - [ "$status" -eq 0 ] - - run grep "^status: To Do" "$task_file" - [ "$status" -eq 0 ] -} - -# Test 8: STP wrapper prevents git errors -@test "integration: stp bl wrapper prevents git fetch errors" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # The main value of the STP wrapper is preventing git errors - # by automatically adding --plain to commands - - # Create a task - stp bl create ST0099 "Git error prevention test" >/dev/null 2>&1 - - # These commands should work without git fetch errors - run stp bl list - [ "$status" -eq 0 ] - # Should not contain git error messages - [[ "$output" != *"fatal:"* ]] - [[ "$output" != *"git"* ]] -} - - -# Test 9: Task status update -@test "integration: updating task status through backlog" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create a task - output=$(stp bl create ST0099 "Status update test" 2>&1) - task_id=$(echo "$output" | grep -oE "task-[0-9]+" | head -1) - - # Update status - run stp bl task edit "$task_id" --status "In Progress" - [ "$status" -eq 0 ] - - # Verify status changed - run stp task list ST0099 - [ "$status" -eq 0 ] - [[ "$output" == *"[in-progress]"* || "$output" == *"[in_progress]"* ]] -} - -# Test 10: Integration with status sync -@test "integration: status sync updates steel thread based on tasks" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create tasks and mark some as done - output1=$(stp bl create ST0099 "Task 1" 2>&1) - output2=$(stp bl create ST0099 "Task 2" 2>&1) - - task1_id=$(echo "$output1" | grep -oE "task-[0-9]+" | head -1) - task2_id=$(echo "$output2" | grep -oE "task-[0-9]+" | head -1) - - # Mark one task as done - stp bl task edit "$task1_id" --status Done >/dev/null 2>&1 - - # Check status before sync - run grep "^status:" "stp/prj/st/ST0099.md" - [ "$status" -eq 0 ] - [[ "$output" == *"Not Started"* ]] - - # Sync status - run stp status sync ST0099 - [ "$status" -eq 0 ] - - # Check status after sync - run grep "^status:" "stp/prj/st/ST0099.md" - [ "$status" -eq 0 ] - [[ "$output" == *"In Progress"* ]] -} - -# Test 11: Error handling for invalid operations -@test "integration: proper error messages for invalid operations" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Creating task for non-existent steel thread succeeds (backlog doesn't validate existence) - # But stp task commands should handle invalid formats - run stp bl create ST9999 "Valid format task" - [ "$status" -eq 0 ] # This actually succeeds - - # Try to list tasks for invalid ID - run stp task list INVALID - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test 12: Task count accuracy -@test "integration: task counts are accurate across commands" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create exactly 3 tasks - stp bl create ST0099 "Count test 1" >/dev/null 2>&1 - stp bl create ST0099 "Count test 2" >/dev/null 2>&1 - stp bl create ST0099 "Count test 3" >/dev/null 2>&1 - - # Check count via status - run stp status show ST0099 - [ "$status" -eq 0 ] - [[ "$output" == *"Total Tasks: 3"* ]] - - # Check count via task list - run stp task list ST0099 - [ "$status" -eq 0 ] - task_count=$(echo "$output" | grep -c "task-[0-9]") - [ "$task_count" -eq 3 ] -} - -# Test 13: Unicode and special character handling -@test "integration: handles special characters in task titles" { - # Skip if backlog is not installed - if ! command -v backlog &> /dev/null; then - skip "Backlog.md not installed" - fi - - # Create task with special characters - run stp bl create ST0099 "Task with 'quotes' & special chars!" - [ "$status" -eq 0 ] - - # Verify it appears correctly in list - run stp task list ST0099 - [ "$status" -eq 0 ] - [[ "$output" == *"quotes"* ]] - [[ "$output" == *"special chars"* ]] -} - diff --git a/stp/tests/lib/test_helper.bash b/stp/tests/lib/test_helper.bash deleted file mode 100644 index 125e37f..0000000 --- a/stp/tests/lib/test_helper.bash +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash -# Test helper functions and setup for STP tests - -# Set up project-specific paths -# Use absolute paths to ensure tests work from any directory -STP_PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" -STP_BIN_DIR="${STP_PROJECT_ROOT}/stp/bin" -STP_TEST_FIXTURES="${STP_PROJECT_ROOT}/stp/tests/fixtures" -STP_TEMP_DIR="${STP_PROJECT_ROOT}/stp/tests/tmp" - -# Create temporary test directory -setup_file() { - mkdir -p "${STP_TEMP_DIR}" -} - -# Clean up test directory after all tests in file -teardown_file() { - if [ -d "${STP_TEMP_DIR}" ]; then - rm -rf "${STP_TEMP_DIR}" - fi -} - -# Create a temporary test directory for each test -setup() { - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/bats-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 -} - -# Clean up temporary test directory after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Helper function to create a test project directory -create_test_project() { - local project_name="${1:-Test Project}" - local dir="${2:-$TEST_TEMP_DIR/test-project}" - - mkdir -p "$dir" - echo "Test project created: $dir" - echo "Project name: $project_name" - return 0 -} - -# Helper function to simulate git environment for tests -simulate_git_environment() { - local dir="${1:-$TEST_TEMP_DIR}" - - mkdir -p "$dir" - cd "$dir" || exit 1 - - git init -q - git config --local user.name "Test User" - git config --local user.email "test@example.com" - - echo "# Test Project" > README.md - git add README.md - git commit -q -m "Initial commit" - - echo "Git environment set up in $dir" - return 0 -} - -# Helper function to verify directory structure -assert_directory_exists() { - local dir="$1" - if [ ! -d "$dir" ]; then - echo "Directory does not exist: $dir" - return 1 - fi - return 0 -} - -# Alias for consistency -assert_dir_exists() { - assert_directory_exists "$@" -} - -# Helper function to verify file existence -assert_file_exists() { - local file="$1" - if [ ! -f "$file" ]; then - echo "File does not exist: $file" - return 1 - fi - return 0 -} - -# Helper function to verify file content -assert_file_contains() { - local file="$1" - local pattern="$2" - - assert_file_exists "$file" || return 1 - - if ! grep -q "$pattern" "$file"; then - echo "File does not contain pattern: $pattern" - echo "File content:" - cat "$file" - return 1 - fi - return 0 -} - -# Helper function to create a mock command that can be invoked in tests -create_mock_command() { - local command_name="$1" - local exit_status="${2:-0}" - local output="${3:-}" - - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/${command_name}" << EOF -#!/bin/bash -echo "${output}" -exit ${exit_status} -EOF - chmod +x "${TEST_TEMP_DIR}/bin/${command_name}" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" -} \ No newline at end of file diff --git a/stp/tests/llm/llm_test.bats b/stp/tests/llm/llm_test.bats deleted file mode 100755 index 32e6eed..0000000 --- a/stp/tests/llm/llm_test.bats +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_llm script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/llm-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the llm script to the test directory - cp "${STP_BIN_DIR}/stp_llm" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_llm" - - # Create minimal STP_HOME structure - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/eng" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/bin/.help" - - # Create a test usage-rules.md file - cat > "${TEST_TEMP_DIR}/stp_home/stp/eng/usage-rules.md" << EOF ---- -verblock: "01 Jan 2025:v0.1: Test User - Test version" -stp_version: 1.0.0 ---- -# Test Usage Rules - -This is a test usage rules document. - -## Test Section - -Test content for usage rules. -EOF - - # Create help file - cat > "${TEST_TEMP_DIR}/stp_home/stp/bin/.help/llm.help.md" << EOF -@short: -Test LLM commands - -@description: -Test description -EOF - - # Set STP_HOME environment variable - export STP_HOME="${TEST_TEMP_DIR}/stp_home" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - unset STP_HOME - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if llm requires STP_HOME to be set -@test "llm requires STP_HOME environment variable" { - unset STP_HOME - run ./stp_llm usage_rules - [ "$status" -ne 0 ] - [[ "$output" == *"STP_HOME environment variable is not set"* ]] -} - -# Test if llm displays usage when no subcommand is provided -@test "llm displays usage when no subcommand is provided" { - run ./stp_llm - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp llm <subcommand> [options]"* ]] - [[ "$output" == *"usage_rules"* ]] - [[ "$output" == *"--symlink"* ]] -} - -# Test if llm displays usage with help option -@test "llm displays usage with help option" { - run ./stp_llm --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp llm <subcommand> [options]"* ]] -} - -# Test if llm displays usage rules content -@test "llm displays usage rules content" { - run ./stp_llm usage_rules - [ "$status" -eq 0 ] - [[ "$output" == *"Test Usage Rules"* ]] - [[ "$output" == *"Test content for usage rules"* ]] -} - -# Test if llm handles missing usage-rules.md file -@test "llm handles missing usage-rules.md file" { - rm -f "${STP_HOME}/stp/eng/usage-rules.md" - run ./stp_llm usage_rules - [ "$status" -ne 0 ] - [[ "$output" == *"Usage rules file not found"* ]] -} - -# Test if llm creates symlink in current directory -@test "llm creates symlink in current directory" { - run ./stp_llm usage_rules --symlink - [ "$status" -eq 0 ] - [[ "$output" == *"Created symlink: ./usage-rules.md"* ]] - - # Verify symlink exists - [ -L "./usage-rules.md" ] - - # Verify symlink points to correct location - local target="$(readlink ./usage-rules.md)" - [ "$target" == "${STP_HOME}/stp/eng/usage-rules.md" ] -} - -# Test if llm creates symlink in specified directory -@test "llm creates symlink in specified directory" { - mkdir -p "${TEST_TEMP_DIR}/target" - run ./stp_llm usage_rules --symlink "${TEST_TEMP_DIR}/target" - [ "$status" -eq 0 ] - [[ "$output" == *"Created symlink: ${TEST_TEMP_DIR}/target/usage-rules.md"* ]] - - # Verify symlink exists - [ -L "${TEST_TEMP_DIR}/target/usage-rules.md" ] -} - -# Test if llm handles non-existent target directory -@test "llm handles non-existent target directory" { - run ./stp_llm usage_rules --symlink "/non/existent/directory" - [ "$status" -ne 0 ] - [[ "$output" == *"Target directory does not exist"* ]] -} - -# Test if llm handles existing symlink with 'n' response -@test "llm handles existing symlink with cancel response" { - # Create initial symlink - ln -s "${STP_HOME}/stp/eng/usage-rules.md" "./usage-rules.md" - - # Try to create again, responding 'n' to overwrite prompt - # Use echo to provide input directly to the script - output=$(echo "n" | ./stp_llm usage_rules --symlink 2>&1) - status=$? - [ "$status" -eq 0 ] - [[ "$output" == *"already exists"* ]] - [[ "$output" == *"Cancelled"* ]] - - # Verify original symlink still exists - [ -L "./usage-rules.md" ] -} - -# Test if llm handles existing symlink with 'y' response -@test "llm handles existing symlink with overwrite response" { - # Create a dummy file first - touch "./usage-rules.md" - - # Try to create symlink, responding 'y' to overwrite prompt - # Use echo to provide input directly to the script - output=$(echo "y" | ./stp_llm usage_rules --symlink 2>&1) - status=$? - [ "$status" -eq 0 ] - [[ "$output" == *"Created symlink"* ]] - - # Verify it's now a symlink - [ -L "./usage-rules.md" ] -} - -# Test if llm handles unknown subcommand -@test "llm handles unknown subcommand" { - run ./stp_llm unknown_subcommand - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown subcommand: unknown_subcommand"* ]] -} - -# Test if llm handles unknown option for usage_rules -@test "llm handles unknown option for usage_rules" { - run ./stp_llm usage_rules --unknown-option - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown option: --unknown-option"* ]] -} - -# Test symlink creation preserves correct permissions -@test "llm symlink preserves correct permissions" { - run ./stp_llm usage_rules --symlink - [ "$status" -eq 0 ] - - # The symlink itself should exist - [ -L "./usage-rules.md" ] - - # The target file should be readable - [ -r "${STP_HOME}/stp/eng/usage-rules.md" ] -} \ No newline at end of file diff --git a/stp/tests/main/main_test.bats b/stp/tests/main/main_test.bats deleted file mode 100644 index c8a6cfe..0000000 --- a/stp/tests/main/main_test.bats +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env bats -# Tests for the main stp script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/main-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the main script to the test directory - cp "${STP_BIN_DIR}/stp" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp" - - # Create minimal STP structure with required directories and scripts - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/bin" - mkdir -p "${TEST_TEMP_DIR}/stp_home/stp/_templ" - - # Create mock command scripts - echo '#!/bin/bash' > "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_help" - echo 'echo "Help command executed with args: $@"' >> "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_help" - - echo '#!/bin/bash' > "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_test" - echo 'echo "Test command executed with args: $@"' >> "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_test" - - chmod +x "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_help" - chmod +x "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_test" - - # Set STP_HOME environment variable - export STP_HOME="${TEST_TEMP_DIR}/stp_home" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - unset STP_HOME - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if stp without arguments shows help -@test "stp without arguments shows help" { - run ./stp - [ "$status" -eq 0 ] - [[ "$output" == *"Help command executed with args:"* ]] -} - -# Test if stp executes commands correctly -@test "stp executes commands correctly" { - run ./stp test arg1 arg2 - [ "$status" -eq 0 ] - [[ "$output" == *"Test command executed with args: arg1 arg2"* ]] -} - -# Test if stp help works correctly -@test "stp help works correctly" { - run ./stp help test - [ "$status" -eq 0 ] - [[ "$output" == *"Help command executed with args: test"* ]] -} - -# Test if stp handles unknown commands correctly -@test "stp handles unknown commands correctly" { - run ./stp unknown_command - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown command 'unknown_command'"* ]] -} - -# Test if stp can determine STP_HOME from script location -@test "stp can determine STP_HOME from script location" { - # Unset STP_HOME to test auto-detection - unset STP_HOME - - # Create structure that simulates script in bin directory with parent containing _templ - mkdir -p "${TEST_TEMP_DIR}/auto_detect/stp/_templ" - mkdir -p "${TEST_TEMP_DIR}/auto_detect/stp/bin" - cp "${STP_BIN_DIR}/stp" "${TEST_TEMP_DIR}/auto_detect/stp/bin/" - chmod +x "${TEST_TEMP_DIR}/auto_detect/stp/bin/stp" - - # Create mock command scripts - echo '#!/bin/bash' > "${TEST_TEMP_DIR}/auto_detect/stp/bin/stp_help" - echo 'echo "Help command executed. STP_HOME=$STP_HOME"' >> "${TEST_TEMP_DIR}/auto_detect/stp/bin/stp_help" - chmod +x "${TEST_TEMP_DIR}/auto_detect/stp/bin/stp_help" - - # Run from the bin directory - cd "${TEST_TEMP_DIR}/auto_detect/stp/bin" - run ./stp - - [ "$status" -eq 0 ] - [[ "$output" == *"Help command executed. STP_HOME="*"${TEST_TEMP_DIR}/auto_detect"* ]] -} - -# Test if stp makes command scripts executable if needed -@test "stp makes command scripts executable if needed" { - # Create a non-executable script - echo '#!/bin/bash' > "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_nonexec" - echo 'echo "Non-executable command ran"' >> "${TEST_TEMP_DIR}/stp_home/stp/bin/stp_nonexec" - # Do not make it executable - - # First run should make it executable and run it - run ./stp nonexec - [ "$status" -eq 0 ] - [[ "$output" == *"Warning: Making script executable"* ]] - [[ "$output" == *"Non-executable command ran"* ]] -} \ No newline at end of file diff --git a/stp/tests/migrate/migrate_test.bats b/stp/tests/migrate/migrate_test.bats deleted file mode 100644 index 7f36fca..0000000 --- a/stp/tests/migrate/migrate_test.bats +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_migrate script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/migrate-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the migrate script to the test directory - cp "${STP_BIN_DIR}/stp_migrate" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_migrate" - - # Create minimal STP directory structure - mkdir -p "stp/prj/st" - mkdir -p "stp/bin" - mkdir -p "backlog/tasks" - mkdir -p "backlog/drafts" - - # Copy stp and stp_st scripts for validation - cp "${STP_BIN_DIR}/stp" "stp/bin/" - cp "${STP_BIN_DIR}/stp_st" "stp/bin/" - chmod +x "stp/bin/stp" - chmod +x "stp/bin/stp_st" - - # Set STP_HOME for the test - export STP_HOME="${TEST_TEMP_DIR}" - - # Create a test steel thread with embedded tasks - cat > "stp/prj/st/ST0014.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: In Progress -created: 20250320 -completed: ---- -# ST0014: Test Steel Thread - -## Objective -Test objective - -## Tasks -- [x] Completed task one -- [x] Completed task two -- [ ] Pending task three -- [ ] Pending task four - -## Implementation Notes -Test notes -EOF - - # Create another steel thread - cat > "stp/prj/st/ST0015.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: Not Started -created: 20250320 -completed: ---- -# ST0015: Another Thread - -## Objective -Another objective - -## Tasks -- [ ] Task A -- [ ] Task B - -## Notes -More notes -EOF -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if migrate requires arguments -@test "migrate requires arguments when no options" { - run ./stp_migrate - [ "$status" -eq 1 ] - [[ "$output" == *"Usage: stp migrate"* ]] -} - -# Test help command -@test "migrate shows help with --help" { - run ./stp_migrate --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp migrate"* ]] - [[ "$output" == *"--all-active"* ]] - [[ "$output" == *"--dry-run"* ]] -} - -# Test dry run migration -@test "migrate --dry-run shows what would be migrated" { - run ./stp_migrate --dry-run ST0014 - [ "$status" -eq 0 ] - [[ "$output" == *"DRY RUN MODE"* ]] - [[ "$output" == *"Migrating ST0014"* ]] - [[ "$output" == *"Found"*"4 tasks to migrate"* ]] - [[ "$output" == *"[DRY RUN] Would create task: ST0014 - Completed task one (status: done)"* ]] - [[ "$output" == *"[DRY RUN] Would create task: ST0014 - Completed task two (status: done)"* ]] - [[ "$output" == *"[DRY RUN] Would create task: ST0014 - Pending task three (status: todo)"* ]] - [[ "$output" == *"[DRY RUN] Would create task: ST0014 - Pending task four (status: todo)"* ]] - [[ "$output" == *"[DRY RUN] Would update ST0014 to reference Backlog tasks"* ]] -} - -# Test actual migration -@test "migrate creates backlog tasks from embedded tasks" { - # Mock the backlog command to simulate task creation - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -if [[ "$1" == "task" && "$2" == "create" ]]; then - # Extract task number from title - if [[ "$3" =~ "task one" ]]; then - echo "Created task task-1" - elif [[ "$3" =~ "task two" ]]; then - echo "Created task task-2" - elif [[ "$3" =~ "task three" ]]; then - echo "Created task task-3" - elif [[ "$3" =~ "task four" ]]; then - echo "Created task task-4" - fi - exit 0 -elif [[ "$1" == "task" && "$2" == "edit" ]]; then - # Simulate marking task as done - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_migrate ST0014 - [ "$status" -eq 0 ] - [[ "$output" == *"Migrating ST0014"* ]] - [[ "$output" == *"Creating task: ST0014 - Completed task one"* ]] - [[ "$output" == *"Creating task: ST0014 - Completed task two"* ]] - [[ "$output" == *"Creating task: ST0014 - Pending task three"* ]] - [[ "$output" == *"Creating task: ST0014 - Pending task four"* ]] - [[ "$output" == *"Updating steel thread to reference Backlog"* ]] - - # Check that the steel thread was updated - assert_file_contains "stp/prj/st/ST0014.md" "Tasks are tracked in Backlog" - assert_file_contains "stp/prj/st/ST0014.md" "stp task list ST0014" - - # Ensure old tasks were removed - run grep -E "^- \[.\]" "stp/prj/st/ST0014.md" - [ "$status" -ne 0 ] -} - -# Test migrating non-existent steel thread -@test "migrate errors on non-existent steel thread" { - run ./stp_migrate ST9999 - [ "$status" -ne 0 ] - [[ "$output" == *"Steel thread ST9999 not found"* ]] -} - -# Test invalid steel thread ID format -@test "migrate validates steel thread ID format" { - run ./stp_migrate STXXX - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test migrate with no tasks -@test "migrate handles steel thread with no tasks" { - # Create a steel thread without tasks - cat > "stp/prj/st/ST0016.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: In Progress -created: 20250320 -completed: ---- -# ST0016: No Tasks Thread - -## Objective -Test objective - -## Tasks -No tasks defined yet. - -## Notes -Test notes -EOF - - run ./stp_migrate ST0016 - [ "$status" -eq 0 ] - [[ "$output" == *"No tasks found to migrate"* ]] -} - -# Test --all-active flag -@test "migrate --all-active migrates all active threads" { - # Mock stp st list output - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/stp" << 'EOF' -#!/bin/bash -if [[ "$1" == "st" && "$2" == "list" ]]; then - cat << 'LIST' -ID | Title | Status | Created | Completed ------------|---------------------------|--------------|------------|----------- -ST0014 | Test Steel Thread | In Progress | 2025-03-20 | -ST0015 | Another Thread | Not Started | 2025-03-20 | -LIST - exit 0 -fi -# Pass through to real stp for other commands -"${STP_HOME}/stp/bin/stp" "$@" -EOF - chmod +x "${TEST_TEMP_DIR}/bin/stp" - - # Mock backlog command - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -if [[ "$1" == "task" && "$2" == "create" ]]; then - echo "Created task task-X" - exit 0 -elif [[ "$1" == "task" && "$2" == "edit" ]]; then - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - run ./stp_migrate --all-active --dry-run - [ "$status" -eq 0 ] - [[ "$output" == *"Migrating all active steel threads"* ]] - [[ "$output" == *"Migrating ST0014"* ]] - [[ "$output" == *"Migrating ST0015"* ]] -} - -# Test conflicting options -@test "migrate errors when both --all-active and steel thread ID specified" { - run ./stp_migrate --all-active ST0014 - [ "$status" -ne 0 ] - [[ "$output" == *"Cannot specify both --all-active and a specific steel thread"* ]] -} \ No newline at end of file diff --git a/stp/tests/run_tests.sh b/stp/tests/run_tests.sh deleted file mode 100755 index 2f52e46..0000000 --- a/stp/tests/run_tests.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# run_tests.sh - Run the STP test suite -# Usage: ./run_tests.sh [test_path] - -# Set up colors for output -GREEN='\033[0;32m' -RED='\033[0;31m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Function to display error messages -error() { - echo -e "${RED}Error: $1${NC}" >&2 - exit 1 -} - -# Function to display success messages -success() { - echo -e "${GREEN}$1${NC}" -} - -# Function to display warning messages -warning() { - echo -e "${YELLOW}Warning: $1${NC}" -} - -# Function to display information messages -info() { - echo -e "$1" -} - -# Check if bats is installed -if ! command -v bats &> /dev/null; then - # Check if we're on macOS with Homebrew - if [[ "$OSTYPE" == "darwin"* ]] && command -v brew &> /dev/null; then - error "Bats is not installed. Please install it first: - -On macOS with Homebrew: - brew install bats-core - -Or run the setup script: - ./setup_test_env.sh" - else - error "Bats is not installed. Please install it first: - -Install from source: - git clone https://github.com/bats-core/bats-core.git - cd bats-core - ./install.sh /usr/local - -Or run the setup script: - ./setup_test_env.sh" - fi -fi - -# Get the directory of this script -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -STP_ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" - -# Check if libraries are installed -BATS_SUPPORT="$SCRIPT_DIR/lib/bats-support" -BATS_ASSERT="$SCRIPT_DIR/lib/bats-assert" -BATS_FILE="$SCRIPT_DIR/lib/bats-file" - -if [ ! -d "$BATS_SUPPORT" ] || [ ! -d "$BATS_ASSERT" ] || [ ! -d "$BATS_FILE" ]; then - warning "Bats libraries are not installed in the test directory. Some tests may fail." - warning "To install the libraries:" - warning " mkdir -p \"$SCRIPT_DIR/lib\"" - warning " git clone https://github.com/bats-core/bats-support.git \"$BATS_SUPPORT\"" - warning " git clone https://github.com/bats-core/bats-assert.git \"$BATS_ASSERT\"" - warning " git clone https://github.com/bats-core/bats-file.git \"$BATS_FILE\"" - echo "" - - # Update test_helper.bash to use local libraries if they exist - if [ -f "$SCRIPT_DIR/lib/test_helper.bash" ]; then - sed -i.bak "s|# load '/usr/local/lib/bats-support/load.bash'|# Check if libraries exist locally\nif [ -d \"$SCRIPT_DIR/lib/bats-support\" ]; then\n load \"$SCRIPT_DIR/lib/bats-support/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - sed -i.bak "s|# load '/usr/local/lib/bats-assert/load.bash'|if [ -d \"$SCRIPT_DIR/lib/bats-assert\" ]; then\n load \"$SCRIPT_DIR/lib/bats-assert/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - sed -i.bak "s|# load '/usr/local/lib/bats-file/load.bash'|if [ -d \"$SCRIPT_DIR/lib/bats-file\" ]; then\n load \"$SCRIPT_DIR/lib/bats-file/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - rm -f "$SCRIPT_DIR/lib/test_helper.bash.bak" - success "Updated test_helper.bash to use local libraries if they exist" - fi -fi - -# Create temporary directory for test artifacts -mkdir -p "$SCRIPT_DIR/tmp" - -# Determine which tests to run -if [ $# -gt 0 ]; then - TEST_PATH="$1" - if [ ! -e "$TEST_PATH" ]; then - # Try to resolve relative to the script directory - if [ -e "$SCRIPT_DIR/$TEST_PATH" ]; then - TEST_PATH="$SCRIPT_DIR/$TEST_PATH" - else - error "Test path not found: $TEST_PATH" - fi - fi -else - # Run all tests by default - TEST_PATH="$SCRIPT_DIR" -fi - -# Display information about the test run -info "Running STP Tests" -info "=================" -info "STP Root: $STP_ROOT_DIR" -info "Test Path: $TEST_PATH" -info "Bats Path: $(which bats 2>/dev/null || echo 'Not found')" - -# Check for required libraries -if [ ! -d "$SCRIPT_DIR/lib/bats-support" ] || [ ! -d "$SCRIPT_DIR/lib/bats-assert" ] || [ ! -d "$SCRIPT_DIR/lib/bats-file" ]; then - warning "Some Bats libraries are missing. Running setup_test_env.sh to install them..." - - # Run setup_test_env if it exists and is executable - if [ -x "$SCRIPT_DIR/setup_test_env.sh" ]; then - "$SCRIPT_DIR/setup_test_env.sh" - else - warning "setup_test_env.sh not found or not executable. Please run it manually to set up dependencies." - fi -fi - -echo "" - -# Run the tests -if [[ -d "$TEST_PATH" ]]; then - # If directory, run all .bats files in it, excluding the lib directory - find "$TEST_PATH" -name "*.bats" | grep -v "/lib/" | sort | while read -r test_file; do - info "Running test file: $(basename "$test_file")" - if bats "$test_file"; then - success "✓ $(basename "$test_file") passed" - else - error "✗ $(basename "$test_file") failed" - fi - echo "" - done -else - # Run a specific test file - info "Running test file: $(basename "$TEST_PATH")" - if bats "$TEST_PATH"; then - success "✓ $(basename "$TEST_PATH") passed" - else - error "✗ $(basename "$TEST_PATH") failed" - fi -fi - -# Clean up -rm -rf "$SCRIPT_DIR/tmp" - -success "All tests completed." \ No newline at end of file diff --git a/stp/tests/setup_test_env.sh b/stp/tests/setup_test_env.sh deleted file mode 100755 index 15f9d8a..0000000 --- a/stp/tests/setup_test_env.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# setup_test_env.sh - Set up the test environment for STP -# Usage: ./setup_test_env.sh [install_dir] - -# Set up colors for output -GREEN='\033[0;32m' -RED='\033[0;31m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Function to display error messages -error() { - echo -e "${RED}Error: $1${NC}" >&2 - exit 1 -} - -# Function to display success messages -success() { - echo -e "${GREEN}$1${NC}" -} - -# Function to display warning messages -warning() { - echo -e "${YELLOW}Warning: $1${NC}" -} - -# Function to display information messages -info() { - echo -e "$1" -} - -# Get the directory of this script -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Create lib directory if it doesn't exist -mkdir -p "$SCRIPT_DIR/lib" - -# Check if Bats is already installed -if command -v bats &> /dev/null; then - success "Bats is already installed" -else - info "Installing Bats..." - - # Check if running on macOS with Homebrew available - if [[ "$OSTYPE" == "darwin"* ]] && command -v brew &> /dev/null; then - info "Detected macOS with Homebrew. Installing Bats using brew..." - brew install bats-core || error "Failed to install Bats with Homebrew" - success "Bats installed successfully using Homebrew" - else - # Manual installation from git - # Default install directory - INSTALL_DIR="/usr/local" - - # Use provided install directory if specified - if [ $# -gt 0 ]; then - INSTALL_DIR="$1" - fi - - info "Installing Bats from source to $INSTALL_DIR..." - - # Create a temporary directory for Bats installation - TEMP_DIR=$(mktemp -d) - - # Clone Bats repo - git clone https://github.com/bats-core/bats-core.git "$TEMP_DIR/bats-core" || error "Failed to clone Bats repository" - - # Install Bats - cd "$TEMP_DIR/bats-core" || error "Failed to change to Bats directory" - - if [ "$INSTALL_DIR" = "/usr/local" ]; then - # Need sudo for system directories - sudo ./install.sh "$INSTALL_DIR" || error "Failed to install Bats" - else - # No sudo needed for user-owned directories - ./install.sh "$INSTALL_DIR" || error "Failed to install Bats" - fi - - # Clean up - rm -rf "$TEMP_DIR" - - success "Bats installed successfully to $INSTALL_DIR" - fi -fi - -# Install Bats libraries -info "Installing Bats libraries..." - -# Create lib directory if it doesn't exist -mkdir -p "$SCRIPT_DIR/lib" - -# Bats Support -if [ -d "$SCRIPT_DIR/lib/bats-support" ]; then - info "Bats Support is already installed" -else - info "Installing bats-support..." - git clone https://github.com/bats-core/bats-support.git "$SCRIPT_DIR/lib/bats-support" || error "Failed to clone bats-support" - success "Bats Support installed successfully" -fi - -# Bats Assert -if [ -d "$SCRIPT_DIR/lib/bats-assert" ]; then - info "Bats Assert is already installed" -else - info "Installing bats-assert..." - git clone https://github.com/bats-core/bats-assert.git "$SCRIPT_DIR/lib/bats-assert" || error "Failed to clone bats-assert" - success "Bats Assert installed successfully" -fi - -# Bats File -if [ -d "$SCRIPT_DIR/lib/bats-file" ]; then - info "Bats File is already installed" -else - info "Installing bats-file..." - git clone https://github.com/bats-core/bats-file.git "$SCRIPT_DIR/lib/bats-file" || error "Failed to clone bats-file" - success "Bats File installed successfully" -fi - -# Print information about the bats installation -info "" -info "Bats installation details:" -if command -v bats &> /dev/null; then - BATS_PATH=$(command -v bats) - info " Bats executable: $BATS_PATH" - - # Try to find the load.bash file - if command -v brew &> /dev/null && brew --prefix bats-core &> /dev/null; then - BREW_PREFIX=$(brew --prefix bats-core) - if [ -f "$BREW_PREFIX/lib/bats-core/load.bash" ]; then - info " Bats load.bash: $BREW_PREFIX/lib/bats-core/load.bash" - fi - fi - - # Check for other common locations - for path in \ - "/usr/local/lib/bats/load.bash" \ - "/usr/lib/bats/load.bash" \ - "/opt/homebrew/lib/bats-core/load.bash" \ - "/usr/local/lib/bats-core/load.bash" - do - if [ -f "$path" ]; then - info " Bats load.bash: $path" - break - fi - done -else - warning " Bats executable not found in PATH" -fi - -# Update test_helper.bash to use local libraries -if [ -f "$SCRIPT_DIR/lib/test_helper.bash" ]; then - info "Updating test_helper.bash to use local libraries..." - - # Create a backup - cp "$SCRIPT_DIR/lib/test_helper.bash" "$SCRIPT_DIR/lib/test_helper.bash.bak" - - # Update to use local libraries - sed -i.tmp "s|# load '/usr/local/lib/bats-support/load.bash'|# Check if libraries exist locally\nif [ -d \"$SCRIPT_DIR/lib/bats-support\" ]; then\n load \"$SCRIPT_DIR/lib/bats-support/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - sed -i.tmp "s|# load '/usr/local/lib/bats-assert/load.bash'|if [ -d \"$SCRIPT_DIR/lib/bats-assert\" ]; then\n load \"$SCRIPT_DIR/lib/bats-assert/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - sed -i.tmp "s|# load '/usr/local/lib/bats-file/load.bash'|if [ -d \"$SCRIPT_DIR/lib/bats-file\" ]; then\n load \"$SCRIPT_DIR/lib/bats-file/load.bash\"\nfi|" "$SCRIPT_DIR/lib/test_helper.bash" - - # Remove temporary files - rm -f "$SCRIPT_DIR/lib/test_helper.bash.tmp" - - success "test_helper.bash updated successfully" -fi - -# Create the tmp directory for test runs -mkdir -p "$SCRIPT_DIR/tmp" - -success "Test environment setup complete!" -info "" -info "To run tests, use: ./run_tests.sh" -info "To run a specific test suite, use: ./run_tests.sh bootstrap/bootstrap_test.bats" \ No newline at end of file diff --git a/stp/tests/st/st_directory_test.bats b/stp/tests/st/st_directory_test.bats deleted file mode 100755 index 354ef9c..0000000 --- a/stp/tests/st/st_directory_test.bats +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bats -# Test steel thread directory structure (v1.2.1+) - -setup() { - # Create a temporary test directory - TEST_TEMP_DIR=$(mktemp -d) - export TEST_TEMP_DIR - cd "$TEST_TEMP_DIR" - - # Initialize STP project - "$BATS_TEST_DIRNAME/../../bin/stp_init" "Test Project" . - - # Create version file to indicate v1.2.1 - mkdir -p stp/.config - echo "stp_version: 1.2.1" > stp/.config/version -} - -teardown() { - # Clean up - cd / - rm -rf "$TEST_TEMP_DIR" -} - -@test "stp st new creates directory structure" { - run "$BATS_TEST_DIRNAME/../../bin/stp_st" new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Check directory was created - [ -d "stp/prj/st/ST0001" ] - - # Check files were created - [ -f "stp/prj/st/ST0001/info.md" ] - [ -f "stp/prj/st/ST0001/design.md" ] - [ -f "stp/prj/st/ST0001/impl.md" ] - [ -f "stp/prj/st/ST0001/tasks.md" ] - [ -f "stp/prj/st/ST0001/results.md" ] - - # Check info.md contains correct title - grep -q "Test Steel Thread" "stp/prj/st/ST0001/info.md" -} - -@test "stp st show displays info.md by default" { - # Create a steel thread first - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "Show Test" - - run "$BATS_TEST_DIRNAME/../../bin/stp_st" show ST0001 - [ "$status" -eq 0 ] - [[ "$output" =~ "ST0001: Show Test" ]] -} - -@test "stp st show can display specific files" { - # Create a steel thread first - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "File Test" - - # Add content to design.md - echo "# Design Content" > "stp/prj/st/ST0001/design.md" - - run "$BATS_TEST_DIRNAME/../../bin/stp_st" show ST0001 design - [ "$status" -eq 0 ] - [[ "$output" =~ "Design Content" ]] -} - -@test "stp st show all displays all files" { - # Create a steel thread first - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "All Files Test" - - run "$BATS_TEST_DIRNAME/../../bin/stp_st" show ST0001 all - [ "$status" -eq 0 ] - [[ "$output" =~ "=== info.md ===" ]] - [[ "$output" =~ "=== design.md ===" ]] -} - -@test "stp st list works with directory structure" { - # Create multiple steel threads - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "First Thread" - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "Second Thread" - - run "$BATS_TEST_DIRNAME/../../bin/stp_st" list - [ "$status" -eq 0 ] - [[ "$output" =~ "ST0001" ]] - [[ "$output" =~ "First Thread" ]] - [[ "$output" =~ "ST0002" ]] - [[ "$output" =~ "Second Thread" ]] -} - -@test "stp st done moves entire directory" { - # Create a steel thread - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "Complete Me" - - # Since we're in test environment, directories aren't moved by status - # So we'll just check that the status is updated - run "$BATS_TEST_DIRNAME/../../bin/stp_st" done ST0001 - [ "$status" -eq 0 ] - - # In test environment, directory stays in place but status is updated - [ -d "stp/prj/st/ST0001" ] - [ -f "stp/prj/st/ST0001/info.md" ] - - # Check status was updated - grep -q "status: Completed" "stp/prj/st/ST0001/info.md" -} - -@test "stp st edit creates file if it doesn't exist" { - # Create a steel thread - "$BATS_TEST_DIRNAME/../../bin/stp_st" new "Edit Test" - - # Remove a file - rm -f "stp/prj/st/ST0001/impl.md" - - # Try to edit it (we can't test the actual editing, but we can check file creation) - # For testing, we'll just touch the file as if it was edited - touch "stp/prj/st/ST0001/impl.md" - - [ -f "stp/prj/st/ST0001/impl.md" ] -} \ No newline at end of file diff --git a/stp/tests/st/st_test.bats b/stp/tests/st/st_test.bats deleted file mode 100644 index e03f43c..0000000 --- a/stp/tests/st/st_test.bats +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_st script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/st-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the steel thread script to the test directory - cp "${STP_BIN_DIR}/stp_st" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_st" - - # Create minimal STP directory structure - mkdir -p "stp/prj/st" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if st requires a command -@test "st requires a command" { - run ./stp_st - [ "$status" -ne 0 ] - [[ "$output" == *"Steel thread command is required"* ]] -} - -# Test creating a new steel thread -@test "st new creates a new steel thread" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - run ./stp_st new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Check if steel thread directory was created - assert_dir_exists "stp/prj/st/ST0001" - assert_file_exists "stp/prj/st/ST0001/info.md" - assert_file_contains "stp/prj/st/ST0001/info.md" "ST0001: Test Steel Thread" - run grep -F "status: Not Started" "stp/prj/st/ST0001/info.md" - [ "$status" -eq 0 ] - - # Check if index was updated - assert_file_exists "stp/prj/st/steel_threads.md" - assert_file_contains "stp/prj/st/steel_threads.md" "ST0001" - assert_file_contains "stp/prj/st/steel_threads.md" "Test Steel Thread" - assert_file_contains "stp/prj/st/steel_threads.md" "Not Started" -} - -# Test creating multiple steel threads and check IDs -@test "st new creates sequential steel thread IDs" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create first steel thread - run ./stp_st new "First Steel Thread" - [ "$status" -eq 0 ] - assert_dir_exists "stp/prj/st/ST0001" - - # Create second steel thread - run ./stp_st new "Second Steel Thread" - [ "$status" -eq 0 ] - assert_dir_exists "stp/prj/st/ST0002" - - # Create third steel thread - run ./stp_st new "Third Steel Thread" - [ "$status" -eq 0 ] - assert_dir_exists "stp/prj/st/ST0003" - - # Check if index contains all three steel threads - assert_file_contains "stp/prj/st/steel_threads.md" "ST0001" - assert_file_contains "stp/prj/st/steel_threads.md" "ST0002" - assert_file_contains "stp/prj/st/steel_threads.md" "ST0003" -} - -# Test marking a steel thread as done -@test "st done marks a steel thread as complete" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create a steel thread - run ./stp_st new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Mark it as done - run ./stp_st done "ST0001" - [ "$status" -eq 0 ] - - # Check if status and completion date were updated in info.md - run grep -F "status: Completed" "stp/prj/st/ST0001/info.md" - [ "$status" -eq 0 ] - - # Check completion date in YAML frontmatter - using today's date - run grep -F "completed: $(date '+%Y%m%d')" "stp/prj/st/ST0001/info.md" - [ "$status" -eq 0 ] - - # Check if index was updated - assert_file_contains "stp/prj/st/steel_threads.md" "Completed" -} - -# Test marking a steel thread as done using just the number -@test "st done works with just the number" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create a steel thread - run ./stp_st new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Mark it as done using just the number - run ./stp_st done "1" - [ "$status" -eq 0 ] - - # Check if status was updated - run grep -F "status: Completed" "stp/prj/st/ST0001/info.md" - [ "$status" -eq 0 ] -} - -# Test listing steel threads -@test "st list shows all steel threads" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create three steel threads with different statuses - run ./stp_st new "First Steel Thread" - [ "$status" -eq 0 ] - run ./stp_st new "Second Steel Thread" - [ "$status" -eq 0 ] - run ./stp_st new "Third Steel Thread" - [ "$status" -eq 0 ] - - # Mark second as done - run ./stp_st done "2" - [ "$status" -eq 0 ] - - # List all steel threads - run ./stp_st list - echo "Output of st list: $output" - echo "Exit status: $status" - [ "$status" -eq 0 ] - - # Check if all three steel thread directories were created properly - assert_dir_exists "stp/prj/st/ST0001" - assert_dir_exists "stp/prj/st/ST0002" - assert_dir_exists "stp/prj/st/ST0003" - - # Check that the index file has expected entries - assert_file_exists "stp/prj/st/steel_threads.md" - run grep "First Steel Thread" "stp/prj/st/steel_threads.md" - [ "$status" -eq 0 ] - run grep "Second Steel Thread" "stp/prj/st/steel_threads.md" - [ "$status" -eq 0 ] - run grep "Third Steel Thread" "stp/prj/st/steel_threads.md" - [ "$status" -eq 0 ] -} - -# Test listing steel threads with status filter -@test "st list --status filters by status" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create three steel threads - run ./stp_st new "First Steel Thread" - [ "$status" -eq 0 ] - run ./stp_st new "Second Steel Thread" - [ "$status" -eq 0 ] - run ./stp_st new "Third Steel Thread" - [ "$status" -eq 0 ] - - # Mark second as done - run ./stp_st done "2" - [ "$status" -eq 0 ] - - # List only completed steel threads - run ./stp_st list --status "Completed" - echo "Output of st list --status: $output" - echo "Exit status: $status" - [ "$status" -eq 0 ] - - # We won't test the command output directly as it's being tricky - # Instead, verify that the directories were created with the correct content - assert_dir_exists "stp/prj/st/ST0001" - assert_dir_exists "stp/prj/st/ST0002" - assert_dir_exists "stp/prj/st/ST0003" - - # Check that ST0002 is marked as completed - run grep -F "status: Completed" "stp/prj/st/ST0002/info.md" - [ "$status" -eq 0 ] - - # Check that the other threads are not completed - run grep -F "status: Not Started" "stp/prj/st/ST0001/info.md" - [ "$status" -eq 0 ] - run grep -F "status: Not Started" "stp/prj/st/ST0003/info.md" - [ "$status" -eq 0 ] -} - -# Test showing a steel thread -@test "st show displays the content of a steel thread" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create a steel thread - run ./stp_st new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Show the steel thread (defaults to info.md) - run ./stp_st show "ST0001" - [ "$status" -eq 0 ] - - # Check if content is displayed - [[ "$output" == *"ST0001: Test Steel Thread"* ]] - [[ "$output" == *"status: Not Started"* ]] -} - -# Test showing a steel thread with just the number -@test "st show works with just the number" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create a steel thread - run ./stp_st new "Test Steel Thread" - [ "$status" -eq 0 ] - - # Show the steel thread using just the number - run ./stp_st show "1" - [ "$status" -eq 0 ] - - # Check if content is displayed - [[ "$output" == *"ST0001: Test Steel Thread"* ]] -} - -# Test error when showing a non-existent steel thread -@test "st show errors on non-existent steel thread" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - run ./stp_st show "ST9999" - [ "$status" -ne 0 ] - [[ "$output" == *"File not found"* ]] -} - -# Test creating a steel thread with a template if available -@test "st new uses template if available" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create template directory structure - mkdir -p "stp/_templ/prj/st/ST####" - cat > "stp/_templ/prj/st/ST####/info.md" << EOF ---- -verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" -stp_version: 1.2.1 -status: Not Started -created: YYYYMMDD -completed: ---- -# ST####: [Title] - -## Custom Section -This is a custom template -EOF - - # Create a steel thread using the template - run ./stp_st new "Template Test" - [ "$status" -eq 0 ] - - # Check if template was used - assert_dir_exists "stp/prj/st/ST0001" - assert_file_contains "stp/prj/st/ST0001/info.md" "ST0001: Template Test" - assert_file_contains "stp/prj/st/ST0001/info.md" "## Custom Section" - assert_file_contains "stp/prj/st/ST0001/info.md" "This is a custom template" -} - -# Test synchronizing steel threads index -@test "st sync updates the steel_threads.md file" { - # Create version file for v1.2.1 - mkdir -p "stp/.config" - echo "stp_version: 1.2.1" > "stp/.config/version" - - # Create section markers in steel_threads.md - mkdir -p "stp/prj/st" - cat > "stp/prj/st/steel_threads.md" << EOF -# Steel Threads - -This document serves as an index of all steel threads in the project. - -## Index - -<!-- BEGIN: STEEL_THREAD_INDEX --> -Old content that should be replaced -<!-- END: STEEL_THREAD_INDEX --> - -## Status Definitions - -<!-- BEGIN: STATUS_DEFINITIONS --> -Old status definitions -<!-- END: STATUS_DEFINITIONS --> -EOF - - # Create three steel threads with different statuses - run ./stp_st new "First Steel Thread" - [ "$status" -eq 0 ] - run ./stp_st new "Second Steel Thread" - [ "$status" -eq 0 ] - # Mark second as completed - run ./stp_st done "2" - [ "$status" -eq 0 ] - - # Run the sync command with --write - run ./stp_st sync --write --width 80 - [ "$status" -eq 0 ] - [[ "$output" == *"Updated steel threads index file"* ]] - - # Check that the old content was replaced - run grep -F "Old content that should be replaced" "stp/prj/st/steel_threads.md" - [ "$status" -ne 0 ] - - # Check that the new content contains the steel threads - assert_file_contains "stp/prj/st/steel_threads.md" "First Steel Thread" - assert_file_contains "stp/prj/st/steel_threads.md" "Second Steel Thread" - assert_file_contains "stp/prj/st/steel_threads.md" "Not Started" - assert_file_contains "stp/prj/st/steel_threads.md" "Completed" - - # Run the sync command without --write (should output to stdout) - run ./stp_st sync - [ "$status" -eq 0 ] - [[ "$output" == *"First Steel Thread"* ]] - [[ "$output" == *"Second Steel Thread"* ]] -} - -# Test the width parameter for sync -@test "st sync respects the --width parameter" { - # Create test steel thread - run ./stp_st new "Test Steel Thread With a Very Long Name That Will Be Truncated" - [ "$status" -eq 0 ] - - # Run sync with a narrow width - run ./stp_st sync --width 40 - [ "$status" -eq 0 ] - - # Run sync with a wide width - run ./stp_st sync --width 120 - [ "$status" -eq 0 ] - - # We don't test exact formatting here, just that the command runs successfully - # as formatting tests would be too brittle -} \ No newline at end of file diff --git a/stp/tests/status/status_test.bats b/stp/tests/status/status_test.bats deleted file mode 100644 index c542cd9..0000000 --- a/stp/tests/status/status_test.bats +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_status script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/status-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the status script to the test directory - cp "${STP_BIN_DIR}/stp_status" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_status" - - # Create minimal STP directory structure - mkdir -p "stp/prj/st" - mkdir -p "stp/bin" - mkdir -p "backlog/tasks" - mkdir -p "backlog/drafts" - - # Copy stp and stp_st scripts for validation - cp "${STP_BIN_DIR}/stp" "stp/bin/" - cp "${STP_BIN_DIR}/stp_st" "stp/bin/" - chmod +x "stp/bin/stp" - chmod +x "stp/bin/stp_st" - - # Set STP_HOME for the test - export STP_HOME="${TEST_TEMP_DIR}" - - # Create a test steel thread - cat > "stp/prj/st/ST0014.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: In Progress -created: 20250320 -completed: ---- -# ST0014: Test Steel Thread - -## Objective -Test objective - -## Tasks -Tasks are tracked in Backlog. View with: \`stp task list ST0014\` - -## Implementation Notes -Test notes -EOF -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if status requires a command -@test "status requires a command" { - run ./stp_status - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp status"* ]] -} - -# Test help command -@test "status shows help with --help" { - run ./stp_status --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp status"* ]] - [[ "$output" == *"show"* ]] - [[ "$output" == *"sync"* ]] - [[ "$output" == *"report"* ]] -} - -# Test showing status for a steel thread -@test "status show displays steel thread and task status" { - # Create test task files - cat > "backlog/tasks/task-1 - ST0014-Done-task.md" << EOF ---- -id: task-1 -title: ST0014 - Done task -status: Done ---- -EOF - - cat > "backlog/tasks/task-2 - ST0014-Todo-task.md" << EOF ---- -id: task-2 -title: ST0014 - Todo task -status: To Do ---- -EOF - - run ./stp_status show ST0014 - [ "$status" -eq 0 ] - [[ "$output" == *"Steel Thread: ST0014"* ]] - [[ "$output" == *"Current Status: In Progress"* ]] - [[ "$output" == *"Task Summary:"* ]] - [[ "$output" == *"Total Tasks:"* ]] -} - -# Test showing status for non-existent steel thread -@test "status show errors on non-existent steel thread" { - run ./stp_status show ST9999 - [ "$status" -ne 0 ] - [[ "$output" == *"Steel thread ST9999 not found"* ]] -} - -# Test invalid steel thread ID format -@test "status show validates steel thread ID format" { - run ./stp_status show INVALID - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test sync with dry run -@test "status sync --dry-run shows what would change" { - # Create completed tasks - cat > "backlog/tasks/task-1 - ST0014-Done-task.md" << EOF ---- -id: task-1 -title: ST0014 - Done task -status: Done ---- -EOF - - cat > "backlog/tasks/task-2 - ST0014-Done-task-2.md" << EOF ---- -id: task-2 -title: ST0014 - Another done task -status: Done ---- -EOF - - run ./stp_status sync ST0014 --dry-run - [ "$status" -eq 0 ] - [[ "$output" == *"Steel Thread: ST0014"* ]] - [[ "$output" == *"Current Status: In Progress"* ]] - # When all tasks are done, it should recommend Completed - # [[ "$output" == *"New Status: Completed"* ]] - # [[ "$output" == *"DRY RUN"* ]] -} - -# Test status report -@test "status report shows all active threads" { - # Create additional steel threads - cat > "stp/prj/st/ST0015.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: Not Started -created: 20250320 -completed: ---- -# ST0015: Another Test Thread -EOF - - # Mock the stp st list command - mkdir -p stp/prj/st - cat > stp/prj/st/steel_threads.md << EOF -# Steel Threads - -| ID | Title | Status | Created | Completed | -|----|-------|--------|---------|-----------| -| ST0014 | Test Steel Thread | In Progress | 2025-03-20 | | -| ST0015 | Another Test Thread | Not Started | 2025-03-20 | | -EOF - - run ./stp_status report - [ "$status" -eq 0 ] - [[ "$output" == *"Steel Thread Status Report"* ]] - # The report implementation needs to be tested more thoroughly - # once we understand how it interacts with stp st list -} - -# Test unknown command -@test "status shows error for unknown command" { - run ./stp_status unknown - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown command: unknown"* ]] -} \ No newline at end of file diff --git a/stp/tests/task/task_test.bats b/stp/tests/task/task_test.bats deleted file mode 100644 index ad1a185..0000000 --- a/stp/tests/task/task_test.bats +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_task script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/task-test-XXXXXX")" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Copy the task script to the test directory - cp "${STP_BIN_DIR}/stp_task" "${TEST_TEMP_DIR}/" - chmod +x "${TEST_TEMP_DIR}/stp_task" - - # Create minimal STP directory structure - mkdir -p "stp/prj/st" - mkdir -p "stp/bin" - mkdir -p "backlog/tasks" - mkdir -p "backlog/drafts" - - # Copy stp and stp_st scripts for task validation - cp "${STP_BIN_DIR}/stp" "stp/bin/" - cp "${STP_BIN_DIR}/stp_st" "stp/bin/" - chmod +x "stp/bin/stp" - chmod +x "stp/bin/stp_st" - - # Set STP_HOME for the test - export STP_HOME="${TEST_TEMP_DIR}" - - # Create a test steel thread - cat > "stp/prj/st/ST0014.md" << EOF ---- -verblock: "20 Mar 2025:v0.1: Test - Initial version" -stp_version: 1.0.0 -status: In Progress -created: 20250320 -completed: ---- -# ST0014: Test Steel Thread - -## Objective -Test objective - -## Tasks -- [ ] First task -- [ ] Second task -EOF -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Test if task requires a command -@test "task requires a command" { - run ./stp_task - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp task"* ]] -} - -# Test help command -@test "task shows help with --help" { - run ./stp_task --help - [ "$status" -eq 0 ] - [[ "$output" == *"Usage: stp task"* ]] - [[ "$output" == *"create"* ]] - [[ "$output" == *"list"* ]] - [[ "$output" == *"sync"* ]] -} - -# Test creating a task -@test "task create creates a new backlog task" { - # The steel thread ST0014 is created in setup(), so stp st show should find it - - # Mock the stp bl command by mocking the underlying backlog command - mkdir -p "${TEST_TEMP_DIR}/bin" - cat > "${TEST_TEMP_DIR}/bin/backlog" << 'EOF' -#!/bin/bash -if [[ "$1" == "task" && "$2" == "create" ]]; then - echo "Created task task-1" - echo "File: /path/to/task-1.md" - exit 0 -fi -exit 1 -EOF - chmod +x "${TEST_TEMP_DIR}/bin/backlog" - export PATH="${TEST_TEMP_DIR}/bin:$PATH" - - # Also need to provide stp_backlog and stp_bl for the bl command - cp "${STP_BIN_DIR}/stp_backlog" "stp/bin/" - cp "${STP_BIN_DIR}/stp_bl" "stp/bin/" - chmod +x "stp/bin/stp_backlog" - chmod +x "stp/bin/stp_bl" - - run ./stp_task create ST0014 "Test task description" - [ "$status" -eq 0 ] - [[ "$output" == *"Creating task: ST0014 - Test task description"* ]] - [[ "$output" == *"Task created successfully"* ]] -} - -# Test creating task with invalid steel thread ID -@test "task create validates steel thread ID format" { - run ./stp_task create INVALID "Test task" - [ "$status" -ne 0 ] - [[ "$output" == *"Invalid steel thread ID format"* ]] -} - -# Test creating task without title -@test "task create requires both ID and title" { - run ./stp_task create ST0014 - [ "$status" -ne 0 ] - [[ "$output" == *"Both steel thread ID and title are required"* ]] -} - -# Test listing tasks for a steel thread -@test "task list shows tasks for a steel thread" { - # Create test task files - cat > "backlog/tasks/task-1 - ST0014-First-task.md" << EOF ---- -id: task-1 -title: ST0014 - First task -status: Done -assignee: [] -created_date: '2025-07-08' -labels: [] -dependencies: [] ---- - -## Description -First task description -EOF - - cat > "backlog/tasks/task-2 - ST0014-Second-task.md" << EOF ---- -id: task-2 -title: ST0014 - Second task -status: To Do -assignee: [] -created_date: '2025-07-08' -labels: [] -dependencies: [] ---- - -## Description -Second task description -EOF - - run ./stp_task list ST0014 - [ "$status" -eq 0 ] - [[ "$output" == *"Tasks for ST0014:"* ]] - [[ "$output" == *"task-1"* ]] - [[ "$output" == *"[done]"* ]] - [[ "$output" == *"ST0014 - First task"* ]] - [[ "$output" == *"task-2"* ]] - [[ "$output" == *"[todo]"* ]] - [[ "$output" == *"ST0014 - Second task"* ]] -} - -# Test listing tasks requires steel thread ID -@test "task list requires steel thread ID" { - run ./stp_task list - [ "$status" -ne 0 ] - [[ "$output" == *"Steel thread ID required"* ]] -} - -# Test sync status -@test "task sync shows task status summary" { - # Create test task files with different statuses - cat > "backlog/tasks/task-1 - ST0014-Done-task.md" << EOF ---- -id: task-1 -title: ST0014 - Done task -status: Done ---- -EOF - - cat > "backlog/tasks/task-2 - ST0014-Todo-task.md" << EOF ---- -id: task-2 -title: ST0014 - Todo task -status: To Do ---- -EOF - - cat > "backlog/tasks/task-3 - ST0014-In-progress-task.md" << EOF ---- -id: task-3 -title: ST0014 - In progress task -status: In Progress ---- -EOF - - # Mock the backlog list command output - create_mock_command "backlog" 0 "task-1 - ST0014 - Done task -task-2 - ST0014 - Todo task -task-3 - ST0014 - In progress task" - - run ./stp_task sync ST0014 - [ "$status" -eq 0 ] - [[ "$output" == *"Syncing status for ST0014"* ]] - # The sync command shows task counts, but the implementation - # might need adjustment for proper counting in tests -} - -# Test unknown command -@test "task shows error for unknown command" { - run ./stp_task unknown - [ "$status" -ne 0 ] - [[ "$output" == *"Unknown command: unknown"* ]] -} \ No newline at end of file diff --git a/stp/tests/test-st/stp/.config/version b/stp/tests/test-st/stp/.config/version deleted file mode 100644 index f7d5add..0000000 --- a/stp/tests/test-st/stp/.config/version +++ /dev/null @@ -1 +0,0 @@ -stp_version: 1.2.1 diff --git a/stp/tests/test-st/stp/_templ/prj/st/ST####/info.md b/stp/tests/test-st/stp/_templ/prj/st/ST####/info.md deleted file mode 100644 index de3f08c..0000000 --- a/stp/tests/test-st/stp/_templ/prj/st/ST####/info.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -verblock: "DD MMM YYYY:v0.1: Author Name - Initial version" -stp_version: 1.2.1 -status: Not Started -created: YYYYMMDD -completed: ---- -# ST####: [Title] - -## Custom Section -This is a custom template -EOF < /dev/null \ No newline at end of file diff --git a/stp/tests/test-st/stp_st b/stp/tests/test-st/stp_st deleted file mode 100755 index f18ac82..0000000 --- a/stp/tests/test-st/stp_st +++ /dev/null @@ -1,1074 +0,0 @@ -#!/bin/bash -# stp_st - Manage steel threads (v1.2.1 - Directory-based structure) -# Usage: stp_st <command> [options] [arguments] - -# Exit on error -set -e - -# Function to display error messages -error() { - echo "Error: $1" >&2 - exit 1 -} - -# Function to display usage information -usage() { - echo "Usage: stp st <command> [options] [arguments]" - echo "" - echo "Manage steel threads for the project" - echo "" - echo "Commands:" - echo " new <title> Create a new steel thread" - echo " done <id> Mark a steel thread as complete" - echo " list [--status <status>] [--width N] List all steel threads" - echo " sync [--write] [--width N] Synchronize steel_threads.md with individual ST files" - echo " organize [--write] Organize ST files in directories by status" - echo " show <id> [file] Show details of a specific steel thread" - echo " edit <id> [file] Open a steel thread in your default editor" - echo "" - echo "File options for show/edit commands:" - echo " info - Main information file (default)" - echo " design - Design decisions and approach" - echo " impl - Implementation details" - echo " tasks - Task tracking" - echo " results - Results and outcomes" - echo " all - Show all files combined (show only)" - echo "" - echo "Examples:" - echo " stp st new \"Implement Feature X\"" - echo " stp st done ST0001" - echo " stp st list --status \"In Progress\" --width 100" - echo " stp st show ST0001" - echo " stp st show ST0001 design" - echo " stp st edit ST0001 impl" - exit 1 -} - -# Check for required arguments -if [ $# -lt 1 ]; then - error "Steel thread command is required" - usage -fi - -# Load project configuration if available -if [ -f stp/.config/config ]; then - source stp/.config/config -elif [ -f .stp-config ]; then - # For backward compatibility - source .stp-config -fi - -# Get command -ST_COMMAND="$1" -shift - -# Function to check if we're using directory structure (v1.2.1+) -is_directory_structure() { - local stp_version=$(grep -m 1 "^stp_version:" stp/.config/version 2>/dev/null | sed "s/^stp_version: *//") - if [[ "$stp_version" > "1.2.0" ]] || [[ "$stp_version" == "1.2.1" ]]; then - return 0 # true - using directory structure - else - return 1 # false - using file structure - fi -} - -# Function to determine the appropriate path for a steel thread based on its status -get_st_path() { - local st_id="$1" - local status="$2" - local file_name="${3:-info.md}" # Default to info.md - local base_dir="stp/prj/st" - local test_env=0 - - # Check if we're in a test environment - if [[ "${TEST_TEMP_DIR:-}" != "" ]] || [[ "$(pwd)" == /tmp* ]] || [[ "$(pwd)" == */stp/tests/* ]]; then - test_env=1 - fi - - # For directory structure - if is_directory_structure; then - # If we're in a test environment, just use the main directory - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id/$file_name" - return - fi - - # If status is not provided, try to find the steel thread directory - if [ -z "$status" ]; then - # Check all possible locations - local possible_locations=( - "$base_dir/$st_id" - "$base_dir/COMPLETED/$st_id" - "$base_dir/NOT-STARTED/$st_id" - "$base_dir/CANCELLED/$st_id" - ) - - for location in "${possible_locations[@]}"; do - if [ -d "$location" ]; then - # Read status from info.md - if [ -f "$location/info.md" ]; then - yaml_status=$(grep -m 1 "^status:" "$location/info.md" | sed "s/^status: *//") - body_status=$(grep -m 1 "^\- \*\*Status\*\*:" "$location/info.md" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Prioritize YAML frontmatter status - if [ -n "$yaml_status" ]; then - status="$yaml_status" - elif [ -n "$body_status" ]; then - status="$body_status" - else - status="Not Started" - fi - fi - break - fi - done - fi - - # Return the appropriate directory based on status - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id/$file_name" - else - case "$status" in - "Completed") - echo "$base_dir/COMPLETED/$st_id/$file_name" - ;; - "Not Started") - echo "$base_dir/NOT-STARTED/$st_id/$file_name" - ;; - "Cancelled") - echo "$base_dir/CANCELLED/$st_id/$file_name" - ;; - *) - # In Progress or On Hold stay in the main directory - echo "$base_dir/$st_id/$file_name" - ;; - esac - fi - else - # Legacy file structure - ignore file_name parameter - if [ $test_env -eq 1 ]; then - echo "$base_dir/$st_id.md" - else - case "$status" in - "Completed") - echo "$base_dir/COMPLETED/$st_id.md" - ;; - "Not Started") - echo "$base_dir/NOT-STARTED/$st_id.md" - ;; - "Cancelled") - echo "$base_dir/CANCELLED/$st_id.md" - ;; - *) - echo "$base_dir/$st_id.md" - ;; - esac - fi - fi -} - -# Function to get the next steel thread ID -get_next_steel_thread_id() { - local st_prefix="${ST_PREFIX:-ST}" - local base_dir="stp/prj/st" - local next_id=1 - local max_id=0 - - if is_directory_structure; then - # Find all ST directories - for dir in $(find "$base_dir" -type d -name "$st_prefix[0-9][0-9][0-9][0-9]" 2>/dev/null); do - # Extract numeric part of directory name - local id_str=$(basename "$dir") - id_str=${id_str#$st_prefix} - - # Convert to number and compare - if [[ "$id_str" =~ ^[0-9]+$ ]]; then - local id=$((10#$id_str)) - if [ $id -gt $max_id ]; then - max_id=$id - fi - fi - done - else - # Legacy: Find all ST files - for file in $(find "$base_dir" -type f -name "$st_prefix[0-9][0-9][0-9][0-9].md" 2>/dev/null); do - local id_str=$(basename "$file" .md) - id_str=${id_str#$st_prefix} - - if [[ "$id_str" =~ ^[0-9]+$ ]]; then - local id=$((10#$id_str)) - if [ $id -gt $max_id ]; then - max_id=$id - fi - fi - done - fi - - # Increment for the next ID - next_id=$((max_id + 1)) - - # Format with leading zeros (4 digits) - printf "%s%04d" "$st_prefix" $next_id -} - -# Function to update steel threads index -update_steel_threads_index() { - local id="$1" - local title="$2" - local status="$3" - local created="$4" - local completed="$5" - local index_file="stp/prj/st/steel_threads.md" - - # Create index file if it doesn't exist - if [ ! -f "$index_file" ]; then - mkdir -p "$(dirname "$index_file")" - cat > "$index_file" << EOF -# Steel Threads - -This document serves as an index of all steel threads in the project. - -## Index - -| ID | Title | Status | Created | Completed | -| ----------------------- | -------------------- | ------------ | ---------- | ---------- | -EOF - fi - - # Check if entry already exists - if grep -q "^| $id " "$index_file"; then - # Update existing entry - sed -i.bak "s/^| $id .*$/| $id | $title | $status | $created | $completed |/" "$index_file" - rm -f "$index_file.bak" - else - # Add new entry - echo "| $id | $title | $status | $created | $completed |" >> "$index_file" - fi -} - -# Normalize the command (handle alternative spelling) -if [ "$ST_COMMAND" = "organise" ]; then - ST_COMMAND="organize" -fi - -# Handle different commands -case "$ST_COMMAND" in - "new") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread title is required" - usage - fi - - TITLE="$1" - ST_ID=$(get_next_steel_thread_id) - ST_STATUS="Not Started" - DATE=$(date '+%Y-%m-%d') - AUTHOR="${STP_AUTHOR:-${AUTHOR:-$(git config user.name 2>/dev/null || echo "$USER")}}" - - if is_directory_structure; then - # Create directory structure - ST_DIR=$(dirname $(get_st_path "$ST_ID" "$ST_STATUS" "info.md")) - mkdir -p "$ST_DIR" - - # Create files from templates - # Try to find templates relative to script location - SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - TEMPLATE_DIR="${SCRIPT_DIR}/../_templ/prj/st/ST####" - - if [ -d "$TEMPLATE_DIR" ]; then - # Format date in both formats - DATE_COMPACT=$(date '+%Y%m%d') - DATE_VERBOSE=$(date '+%d %b %Y') - - # Copy and process each template file - for template in "$TEMPLATE_DIR"/*.md; do - if [ -f "$template" ]; then - filename=$(basename "$template") - output_file="$ST_DIR/$filename" - - sed -e "s/ST####/$ST_ID/g" \ - -e "s/\[Title\]/$TITLE/g" \ - -e "s/\[Not Started|In Progress|Completed|On Hold|Cancelled\]/$ST_STATUS/g" \ - -e "s/YYYY-MM-DD/$DATE/g" \ - -e "s/YYYYMMDD/$DATE_COMPACT/g" \ - -e "s/\[Date\]/$DATE_VERBOSE/g" \ - -e "s/\[Author Name\]/$AUTHOR/g" \ - -e "s/\[Author\]/$AUTHOR/g" \ - "$template" > "$output_file" - fi - done - else - # Create minimal info.md if no templates - cat > "$ST_DIR/info.md" << EOF ---- -verblock: "$(date '+%d %b %Y'):v0.1: $AUTHOR - Initial version" -stp_version: 1.2.1 -status: $ST_STATUS -created: $(date '+%Y%m%d') -completed: ---- -# $ST_ID: $TITLE - -- **Status**: $ST_STATUS -- **Created**: $DATE -- **Completed**: -- **Author**: $AUTHOR - -## Objective - -[Clear statement of what this steel thread aims to accomplish] - -## Context - -[Background information and context for this steel thread] - -## Related Steel Threads - -- [List any related steel threads here] -EOF - fi - - echo "Created steel thread directory: $ST_DIR" - echo "Main file: $ST_DIR/info.md" - else - # Legacy: Create single file - ST_FILE=$(get_st_path "$ST_ID" "$ST_STATUS") - mkdir -p "$(dirname "$ST_FILE")" - - if [ -f "stp/_templ/prj/st/_ST####.md" ]; then - DATE_COMPACT=$(date '+%Y%m%d') - - sed -e "s/ST####/$ST_ID/g" \ - -e "s/\[Title\]/$TITLE/g" \ - -e "s/\[Not Started|In Progress|Completed|On Hold|Cancelled\]/$ST_STATUS/g" \ - -e "s/YYYY-MM-DD/$DATE/g" \ - -e "s/YYYYMMDD/$DATE_COMPACT/g" \ - -e "s/\[Author Name\]/$AUTHOR/g" \ - "stp/_templ/prj/st/_ST####.md" > "$ST_FILE" - fi - - echo "Created steel thread: $ST_ID: $TITLE" - echo "Edit file: $ST_FILE" - fi - - # Update index - update_steel_threads_index "$ST_ID" "$TITLE" "$ST_STATUS" "$DATE" "" - ;; - - "done") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - # If the ID doesn't start with ST, prepend it - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - DATE=$(date '+%Y-%m-%d') - - if is_directory_structure; then - # Find the info.md file - ST_FILE=$(get_st_path "$ST_ID" "" "info.md") - - # Check if steel thread exists - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - # Extract title - TITLE=$(grep "^# $ST_ID:" "$ST_FILE" | sed "s/^# $ST_ID: //") - - # Update status and completion date - sed -i.bak "s/^\- \*\*Status\*\*: .*$/- **Status**: Completed/" "$ST_FILE" - sed -i.bak "s/^\- \*\*Completed\*\*: .*$/- **Completed**: $DATE/" "$ST_FILE" - sed -i.bak "s/^status: .*$/status: Completed/" "$ST_FILE" - sed -i.bak "s/^completed: .*$/completed: $(date '+%Y%m%d')/" "$ST_FILE" - rm -f "$ST_FILE.bak" - - # Get current directory - CURRENT_DIR=$(dirname "$ST_FILE") - - # Get the target location - NEW_ST_FILE=$(get_st_path "$ST_ID" "Completed" "info.md") - NEW_DIR=$(dirname "$NEW_ST_FILE") - - # Move the entire directory if it's different - if [ "$CURRENT_DIR" != "$NEW_DIR" ]; then - mkdir -p "$(dirname "$NEW_DIR")" - mv "$CURRENT_DIR" "$NEW_DIR" - echo "Moved steel thread to: $NEW_DIR" - fi - else - # Legacy: Handle single file - ST_FILE=$(get_st_path "$ST_ID") - - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - TITLE=$(grep "^# $ST_ID:" "$ST_FILE" | sed "s/^# $ST_ID: //") - - sed -i.bak "s/^\- \*\*Status\*\*: .*$/- **Status**: Completed/" "$ST_FILE" - sed -i.bak "s/^\- \*\*Completed\*\*: .*$/- **Completed**: $DATE/" "$ST_FILE" - sed -i.bak "s/^status: .*$/status: Completed/" "$ST_FILE" - sed -i.bak "s/^completed: .*$/completed: $(date '+%Y%m%d')/" "$ST_FILE" - rm -f "$ST_FILE.bak" - - NEW_ST_FILE=$(get_st_path "$ST_ID" "Completed") - - if [ "$ST_FILE" != "$NEW_ST_FILE" ]; then - mkdir -p "$(dirname "$NEW_ST_FILE")" - mv "$ST_FILE" "$NEW_ST_FILE" - echo "Moved steel thread to: $NEW_ST_FILE" - fi - fi - - # Update index - update_steel_threads_index "$ST_ID" "$TITLE" "Completed" "$(grep '^\- \*\*Created\*\*:' "$ST_FILE" 2>/dev/null | sed 's/^\- \*\*Created\*\*: //' || echo "$DATE")" "$DATE" - - echo "Marked steel thread as complete: $ST_ID: $TITLE" - ;; - - "list") - # Parse options - STATUS="" - WIDTH=0 # Default to terminal width - while [ $# -gt 0 ]; do - case "$1" in - --status) - shift - STATUS="$1" - shift - ;; - --width) - shift - WIDTH="$1" - shift - ;; - *) - error "Unknown option: $1" - ;; - esac - done - - ST_DIR="stp/prj/st" - - # Check if ST directory exists - if [ ! -d "$ST_DIR" ]; then - error "Steel threads directory not found" - fi - - # Determine table width (same as original) - if [ "$WIDTH" -gt 0 ]; then - TABLE_WIDTH=$WIDTH - else - if [ "$ST_COMMAND" = "sync" ] && [ $WRITE_MODE -eq 1 ]; then - TABLE_WIDTH=80 - else - if [ -n "$COLUMNS" ]; then - TABLE_WIDTH=$COLUMNS - elif [ -t 1 ]; then - STTY_SIZE=$( (stty size 2>/dev/null || echo "24 80") | cut -d' ' -f2) - if [ -n "$STTY_SIZE" ] && [ "$STTY_SIZE" -gt 0 ]; then - TABLE_WIDTH=$STTY_SIZE - else - TPUT_COLS=$(tput cols 2>/dev/null || echo 80) - TABLE_WIDTH=$TPUT_COLS - fi - else - TABLE_WIDTH=80 - fi - fi - fi - - # Calculate column widths (same as original) - MIN_ID_WIDTH=10 - MIN_STATUS_WIDTH=12 - MIN_DATE_WIDTH=10 - MIN_TITLE_WIDTH=20 - - FIXED_MIN_WIDTH=$((MIN_ID_WIDTH + MIN_STATUS_WIDTH + MIN_DATE_WIDTH + MIN_DATE_WIDTH)) - SEPARATORS_WIDTH=13 - AVAILABLE_WIDTH=$((TABLE_WIDTH - SEPARATORS_WIDTH)) - - if [ $AVAILABLE_WIDTH -lt $((FIXED_MIN_WIDTH + MIN_TITLE_WIDTH)) ]; then - ID_WIDTH=$MIN_ID_WIDTH - STATUS_WIDTH=$MIN_STATUS_WIDTH - DATE_WIDTH=$MIN_DATE_WIDTH - TITLE_WIDTH=$MIN_TITLE_WIDTH - else - if [ $AVAILABLE_WIDTH -le 100 ]; then - ID_WIDTH=$MIN_ID_WIDTH - STATUS_WIDTH=$MIN_STATUS_WIDTH - DATE_WIDTH=$MIN_DATE_WIDTH - - MAX_TITLE_PCT=50 - MAX_TITLE_WIDTH=$((AVAILABLE_WIDTH * MAX_TITLE_PCT / 100)) - TITLE_WIDTH=$((AVAILABLE_WIDTH - MIN_ID_WIDTH - MIN_STATUS_WIDTH - MIN_DATE_WIDTH - MIN_DATE_WIDTH)) - - [ $TITLE_WIDTH -gt $MAX_TITLE_WIDTH ] && TITLE_WIDTH=$MAX_TITLE_WIDTH - - REMAINING=$((AVAILABLE_WIDTH - ID_WIDTH - STATUS_WIDTH - DATE_WIDTH - DATE_WIDTH - TITLE_WIDTH)) - if [ $REMAINING -gt 0 ]; then - STATUS_WIDTH=$((STATUS_WIDTH + (REMAINING * 4 / 10))) - DATE_WIDTH=$((DATE_WIDTH + (REMAINING * 3 / 10))) - ID_WIDTH=$((ID_WIDTH + (REMAINING * 3 / 10))) - fi - else - ID_WIDTH=$((AVAILABLE_WIDTH * 10 / 100)) - STATUS_WIDTH=$((AVAILABLE_WIDTH * 15 / 100)) - DATE_WIDTH=$((AVAILABLE_WIDTH * 10 / 100)) - TITLE_WIDTH=$((AVAILABLE_WIDTH * 55 / 100)) - - [ $ID_WIDTH -lt $MIN_ID_WIDTH ] && ID_WIDTH=$MIN_ID_WIDTH - [ $STATUS_WIDTH -lt $MIN_STATUS_WIDTH ] && STATUS_WIDTH=$MIN_STATUS_WIDTH - [ $DATE_WIDTH -lt $MIN_DATE_WIDTH ] && DATE_WIDTH=$MIN_DATE_WIDTH - [ $TITLE_WIDTH -lt $MIN_TITLE_WIDTH ] && TITLE_WIDTH=$MIN_TITLE_WIDTH - - TOTAL=$((ID_WIDTH + STATUS_WIDTH + DATE_WIDTH + DATE_WIDTH + TITLE_WIDTH)) - if [ $TOTAL -gt $AVAILABLE_WIDTH ]; then - TITLE_WIDTH=$((TITLE_WIDTH - (TOTAL - AVAILABLE_WIDTH))) - fi - fi - fi - - # Function to truncate string with ellipsis if too long - truncate_string() { - local string="$1" - local width=$2 - - if [ -z "$string" ]; then - echo "" - return - fi - - if [ ${#string} -gt $width ]; then - if [ $width -le 5 ]; then - echo "${string:0:$width}" - else - echo "${string:0:$((width-3))}..." - fi - else - echo "$string" - fi - } - - # Display header - printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ - "ID" "Title" "Status" "Created" "Completed" - printf "%-${ID_WIDTH}s-|-%-${TITLE_WIDTH}s-|-%-${STATUS_WIDTH}s-|-%-${DATE_WIDTH}s-|-%-${DATE_WIDTH}s\n" \ - "$(printf '%0.s-' $(seq 1 $ID_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $TITLE_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $STATUS_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $DATE_WIDTH))" \ - "$(printf '%0.s-' $(seq 1 $DATE_WIDTH))" - - # Collect data from steel threads - declare -a st_data - - if is_directory_structure; then - # Loop through all ST directories - for dir in $(find "$ST_DIR" -type d -name "ST[0-9][0-9][0-9][0-9]"); do - if [ -f "$dir/info.md" ]; then - # Extract ID from directory name - ID=$(basename "$dir") - - # Read metadata from info.md - file="$dir/info.md" - - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - ST_STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - ST_STATUS="$BODY_STATUS" - else - ST_STATUS="Not Started" - fi - - TITLE=$(grep "^# $ID:" "$file" | sed "s/^# $ID: //") - - CREATED=$(grep -m 1 "^\- \*\*Created\*\*:" "$file" | sed "s/^\- \*\*Created\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - COMPLETED=$(grep -m 1 "^\- \*\*Completed\*\*:" "$file" | sed "s/^\- \*\*Completed\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - # Handle date formats (same as original) - if [ -z "$CREATED" ] || [ "$CREATED" = "YYYY-MM-DD" ]; then - YAML_CREATED=$(grep -m 1 "^created:" "$file" | sed "s/^created: *//") - if [ -n "$YAML_CREATED" ] && [ "$YAML_CREATED" != "YYYYMMDD" ]; then - if [[ "$YAML_CREATED" =~ ^[0-9]{8}$ ]]; then - CREATED="${YAML_CREATED:0:4}-${YAML_CREATED:4:2}-${YAML_CREATED:6:2}" - else - CREATED="$YAML_CREATED" - fi - else - CREATED=$(date '+%Y-%m-%d') - fi - fi - - if [ -z "$COMPLETED" ] || [ "$COMPLETED" = "YYYY-MM-DD" ]; then - YAML_COMPLETED=$(grep -m 1 "^completed:" "$file" | sed "s/^completed: *//") - if [ -n "$YAML_COMPLETED" ] && [ "$YAML_COMPLETED" != "null" ] && [ "$YAML_COMPLETED" != "~" ] && [ "$YAML_COMPLETED" != "YYYYMMDD" ]; then - if [[ "$YAML_COMPLETED" =~ ^[0-9]{8}$ ]]; then - COMPLETED="${YAML_COMPLETED:0:4}-${YAML_COMPLETED:4:2}-${YAML_COMPLETED:6:2}" - else - COMPLETED="$YAML_COMPLETED" - fi - fi - fi - - # Skip if the requested status doesn't match - if [ -n "$STATUS" ] && [ "$STATUS" != "$ST_STATUS" ]; then - continue - fi - - st_data+=("$ID|$TITLE|$ST_STATUS|$CREATED|$COMPLETED") - fi - done - else - # Legacy: Loop through all ST####.md files - for file in $(find "$ST_DIR" -type f -name "ST[0-9][0-9][0-9][0-9].md"); do - if [ -f "$file" ]; then - ID=$(basename "$file" .md) - - # (Rest of legacy processing same as original) - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - ST_STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - ST_STATUS="$BODY_STATUS" - else - ST_STATUS="Not Started" - fi - - TITLE=$(grep "^# $ID:" "$file" | sed "s/^# $ID: //") - - CREATED=$(grep -m 1 "^\- \*\*Created\*\*:" "$file" | sed "s/^\- \*\*Created\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - COMPLETED=$(grep -m 1 "^\- \*\*Completed\*\*:" "$file" | sed "s/^\- \*\*Completed\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -z "$CREATED" ] || [ "$CREATED" = "YYYY-MM-DD" ]; then - YAML_CREATED=$(grep -m 1 "^created:" "$file" | sed "s/^created: *//") - if [ -n "$YAML_CREATED" ] && [ "$YAML_CREATED" != "YYYYMMDD" ]; then - if [[ "$YAML_CREATED" =~ ^[0-9]{8}$ ]]; then - CREATED="${YAML_CREATED:0:4}-${YAML_CREATED:4:2}-${YAML_CREATED:6:2}" - else - CREATED="$YAML_CREATED" - fi - else - CREATED=$(date '+%Y-%m-%d') - fi - fi - - if [ -z "$COMPLETED" ] || [ "$COMPLETED" = "YYYY-MM-DD" ]; then - YAML_COMPLETED=$(grep -m 1 "^completed:" "$file" | sed "s/^completed: *//") - if [ -n "$YAML_COMPLETED" ] && [ "$YAML_COMPLETED" != "null" ] && [ "$YAML_COMPLETED" != "~" ] && [ "$YAML_COMPLETED" != "YYYYMMDD" ]; then - if [[ "$YAML_COMPLETED" =~ ^[0-9]{8}$ ]]; then - COMPLETED="${YAML_COMPLETED:0:4}-${YAML_COMPLETED:4:2}-${YAML_COMPLETED:6:2}" - else - COMPLETED="$YAML_COMPLETED" - fi - fi - fi - - if [ -n "$STATUS" ] && [ "$STATUS" != "$ST_STATUS" ]; then - continue - fi - - st_data+=("$ID|$TITLE|$ST_STATUS|$CREATED|$COMPLETED") - fi - done - fi - - # Sort by ID in reverse order (newest first) - IFS=$'\n' sorted_data=($(sort -r <<<"${st_data[*]}")) - unset IFS - - # Process and display rows - for line in "${sorted_data[@]}"; do - ID=$(echo "$line" | cut -d'|' -f1) - TITLE=$(echo "$line" | cut -d'|' -f2) - ST_STATUS=$(echo "$line" | cut -d'|' -f3) - CREATED=$(echo "$line" | cut -d'|' -f4) - COMPLETED=$(echo "$line" | cut -d'|' -f5) - - # Truncate values if needed - ID_TRUNC=$(truncate_string "$ID" $ID_WIDTH) - TITLE_TRUNC=$(truncate_string "$TITLE" $TITLE_WIDTH) - STATUS_TRUNC=$(truncate_string "$ST_STATUS" $STATUS_WIDTH) - CREATED_TRUNC=$(truncate_string "$CREATED" $DATE_WIDTH) - COMPLETED_TRUNC=$(truncate_string "$COMPLETED" $DATE_WIDTH) - - printf "%-${ID_WIDTH}s | %-${TITLE_WIDTH}s | %-${STATUS_WIDTH}s | %-${DATE_WIDTH}s | %-${DATE_WIDTH}s\n" \ - "$ID_TRUNC" "$TITLE_TRUNC" "$STATUS_TRUNC" "$CREATED_TRUNC" "$COMPLETED_TRUNC" - done - ;; - - "show") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - FILE_TYPE="${2:-info}" # Default to info - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - if is_directory_structure; then - if [ "$FILE_TYPE" = "all" ]; then - # Show all files in the steel thread directory - ST_DIR=$(dirname $(get_st_path "$ST_ID" "" "info.md")) - - if [ ! -d "$ST_DIR" ]; then - error "Steel thread not found: $ST_ID" - fi - - # Display each file with a header - for file in info.md design.md impl.md tasks.md results.md; do - if [ -f "$ST_DIR/$file" ]; then - echo "=== $file ===" - cat "$ST_DIR/$file" - echo "" - fi - done - else - # Show specific file - case "$FILE_TYPE" in - info|design|impl|tasks|results) - ST_FILE=$(get_st_path "$ST_ID" "" "$FILE_TYPE.md") - ;; - *) - error "Unknown file type: $FILE_TYPE" - ;; - esac - - if [ ! -f "$ST_FILE" ]; then - error "File not found: $FILE_TYPE.md for steel thread $ST_ID" - fi - - cat "$ST_FILE" - fi - else - # Legacy: Show single file - ST_FILE=$(get_st_path "$ST_ID") - - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - - cat "$ST_FILE" - fi - ;; - - "edit") - # Check for required arguments - if [ $# -lt 1 ]; then - error "Steel thread ID is required" - usage - fi - - # Process the steel thread ID - ST_ID="$1" - FILE_TYPE="${2:-info}" # Default to info - - # If just a number is provided, format it as ST#### (with leading zeros) - if [[ "$ST_ID" =~ ^[0-9]+$ ]]; then - ST_ID=$(printf "ST%04d" "$ST_ID") - elif [[ ! "$ST_ID" =~ ^ST ]]; then - ST_ID="ST$ST_ID" - fi - - if is_directory_structure; then - # Edit specific file - case "$FILE_TYPE" in - info|design|impl|tasks|results) - ST_FILE=$(get_st_path "$ST_ID" "" "$FILE_TYPE.md") - ;; - *) - error "Unknown file type: $FILE_TYPE" - ;; - esac - - if [ ! -f "$ST_FILE" ]; then - # Create the file if it doesn't exist - ST_DIR=$(dirname "$ST_FILE") - if [ -d "$ST_DIR" ]; then - touch "$ST_FILE" - echo "Created new file: $FILE_TYPE.md" - else - error "Steel thread not found: $ST_ID" - fi - fi - else - # Legacy: Edit single file - ST_FILE=$(get_st_path "$ST_ID") - - if [ ! -f "$ST_FILE" ]; then - error "Steel thread not found: $ST_ID" - fi - fi - - # Get absolute path to the file - ABSOLUTE_PATH=$(cd "$(dirname "$ST_FILE")" && pwd)/$(basename "$ST_FILE") - - # Use the appropriate open command based on the OS - if [[ "$OSTYPE" == "darwin"* ]]; then - # macOS - open "$ABSOLUTE_PATH" - elif [[ "$OSTYPE" == "linux-gnu"* ]]; then - # Linux - if command -v xdg-open > /dev/null; then - xdg-open "$ABSOLUTE_PATH" - else - # Fallback to default editor - ${EDITOR:-vi} "$ABSOLUTE_PATH" - fi - elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "cygwin" || "$OSTYPE" == "win32" ]]; then - # Windows - start "$ABSOLUTE_PATH" - else - # Fallback to default editor - ${EDITOR:-vi} "$ABSOLUTE_PATH" - fi - - echo "Opening steel thread: $ST_ID ($FILE_TYPE.md)" - ;; - - "sync") - # Parse options - WRITE_MODE=0 - WIDTH=80 - - while [ $# -gt 0 ]; do - case "$1" in - --write) WRITE_MODE=1; shift ;; - --width) shift; WIDTH="$1"; shift ;; - *) error "Unknown option: $1" ;; - esac - done - - # Paths - ST_DIR="stp/prj/st" - INDEX_FILE="$ST_DIR/steel_threads.md" - - # Basic validation - [ ! -d "$ST_DIR" ] && error "Steel threads directory not found" - [ ! -f "$INDEX_FILE" ] && error "Steel threads index file not found" - - if [ $WRITE_MODE -eq 1 ]; then - # Create temp files - TMP_FILE=$(mktemp) - LIST_OUTPUT=$(mktemp) - - # Get list output silently - "$0" list --width $WIDTH > "$LIST_OUTPUT" 2>/dev/null - - # Extract everything before the markers - sed -n '1,/<!-- BEGIN: STEEL_THREAD_INDEX -->/p' "$INDEX_FILE" > "$TMP_FILE" - - # Add the list output - cat "$LIST_OUTPUT" >> "$TMP_FILE" - - # Add everything after the markers - sed -n '/<!-- END: STEEL_THREAD_INDEX -->/,$p' "$INDEX_FILE" >> "$TMP_FILE" - - # Update file and clean up - mv "$TMP_FILE" "$INDEX_FILE" - rm "$LIST_OUTPUT" - - echo "Updated steel threads index file: $INDEX_FILE" - else - # For display, just run the list command - "$0" list --width "$WIDTH" - fi - ;; - - "organize") - # Parse options - WRITE_MODE=0 - while [ $# -gt 0 ]; do - case "$1" in - --write) - WRITE_MODE=1 - shift - ;; - *) - error "Unknown option: $1" - ;; - esac - done - - BASE_DIR="stp/prj/st" - - # Create required directories if they don't exist - mkdir -p "$BASE_DIR/COMPLETED" "$BASE_DIR/NOT-STARTED" "$BASE_DIR/CANCELLED" - - if is_directory_structure; then - # Find all steel thread directories in the root directory - ST_DIRS=$(find "$BASE_DIR" -maxdepth 1 -type d -name "ST[0-9][0-9][0-9][0-9]") - - # Process each steel thread directory - for dir in $ST_DIRS; do - # Skip if not a directory - if [ ! -d "$dir" ]; then - continue - fi - - # Extract ID from directory name - ID=$(basename "$dir") - - # Read status from info.md - if [ -f "$dir/info.md" ]; then - YAML_STATUS=$(grep -m 1 "^status:" "$dir/info.md" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$dir/info.md" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo "Processing directory: $dir" - echo " Directory: $ID - Status: $STATUS" - - # Get the target location for this directory - TARGET_DIR=$(dirname $(get_st_path "$ID" "$STATUS" "info.md")) - - # If we're in write mode and the target location is different, move the directory - if [ $WRITE_MODE -eq 1 ] && [ "$dir" != "$TARGET_DIR" ]; then - mkdir -p "$(dirname "$TARGET_DIR")" - mv "$dir" "$TARGET_DIR" - echo "Moving $ID to $(dirname "$TARGET_DIR")" - else - if [ "$dir" != "$TARGET_DIR" ]; then - echo "Would move $ID to $(dirname "$TARGET_DIR")" - else - echo "$ID stays in main directory" - fi - fi - fi - done - - # Also check subdirectories - for subdir in "$BASE_DIR"/*; do - if [ -d "$subdir" ] && [[ "$subdir" != "$BASE_DIR/steel_threads.md" ]]; then - SUBDIR_NAME=$(basename "$subdir") - - # Find all steel thread directories in this subdirectory - SUB_ST_DIRS=$(find "$subdir" -maxdepth 1 -type d -name "ST[0-9][0-9][0-9][0-9]") - - for dir in $SUB_ST_DIRS; do - if [ ! -d "$dir" ]; then - continue - fi - - ID=$(basename "$dir") - - if [ -f "$dir/info.md" ]; then - YAML_STATUS=$(grep -m 1 "^status:" "$dir/info.md" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$dir/info.md" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo " Processing directory in subdirectory: $dir" - echo " Directory: $ID - Status: $STATUS" - - TARGET_DIR=$(dirname $(get_st_path "$ID" "$STATUS" "info.md")) - - if [ $WRITE_MODE -eq 1 ] && [ "$dir" != "$TARGET_DIR" ]; then - mkdir -p "$(dirname "$TARGET_DIR")" - mv "$dir" "$TARGET_DIR" - echo "Moving $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_DIR")")" - else - if [ "$dir" != "$TARGET_DIR" ]; then - echo "Would move $ID from $SUBDIR_NAME to $(basename "$(dirname "$TARGET_DIR")")" - fi - fi - fi - done - fi - done - else - # Legacy: organize single files (same as original) - ST_FILES=$(find "$BASE_DIR" -maxdepth 1 -name "ST[0-9][0-9][0-9][0-9].md") - - for file in $ST_FILES; do - if [ ! -f "$file" ]; then - continue - fi - - ID=$(basename "$file" .md) - - YAML_STATUS=$(grep -m 1 "^status:" "$file" | sed "s/^status: *//") - BODY_STATUS=$(grep -m 1 "^\- \*\*Status\*\*:" "$file" | sed "s/^\- \*\*Status\*\*: //" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') - - if [ -n "$YAML_STATUS" ]; then - STATUS="$YAML_STATUS" - elif [ -n "$BODY_STATUS" ]; then - STATUS="$BODY_STATUS" - else - STATUS="Not Started" - fi - - echo "Processing file: $file" - echo " File: $ID - Status: $STATUS" - - TARGET_FILE=$(get_st_path "$ID" "$STATUS") - - if [ $WRITE_MODE -eq 1 ] && [ "$file" != "$TARGET_FILE" ]; then - mkdir -p "$(dirname "$TARGET_FILE")" - mv "$file" "$TARGET_FILE" - echo "Moving $ID to $(dirname "$TARGET_FILE")" - else - if [ "$file" != "$TARGET_FILE" ]; then - echo "Would move $ID to $(dirname "$TARGET_FILE")" - else - echo "$ID stays in main directory" - fi - fi - done - fi - - # If we're in write mode, update the index file after organizing - if [ $WRITE_MODE -eq 1 ]; then - "$0" sync --write - echo "Updated steel threads index." - fi - ;; - - "help") - usage - ;; - - *) - error "Unknown command: $ST_COMMAND" - usage - ;; -esac \ No newline at end of file diff --git a/stp/tests/test_basic.bats b/stp/tests/test_basic.bats deleted file mode 100644 index 8cacbe0..0000000 --- a/stp/tests/test_basic.bats +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bats -# Basic test to check if Bats is working correctly - -@test "Check if true command works" { - run true - [ "$status" -eq 0 ] -} - -@test "Check if echo works" { - result="$(echo 'Hello, World!')" - [ "$result" == "Hello, World!" ] -} - -@test "Check if test variables work" { - value="example" - [ "$value" == "example" ] -} \ No newline at end of file diff --git a/stp/tests/test_helper.bats b/stp/tests/test_helper.bats deleted file mode 100644 index 0bb0b89..0000000 --- a/stp/tests/test_helper.bats +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bats -# Test to verify test_helper.bash functions - -load "lib/test_helper.bash" - -@test "create a temporary directory" { - # The setup function should have created TEST_TEMP_DIR - [ -d "$TEST_TEMP_DIR" ] -} - -@test "check STP_PROJECT_ROOT" { - [ -d "$STP_PROJECT_ROOT" ] -} - -@test "check assertions" { - # Create a test file - echo "test content" > test_file.txt - - # Test assertions - assert_file_exists "test_file.txt" - assert_file_contains "test_file.txt" "test content" -} - -@test "check directory assertions" { - # Create a test directory - mkdir -p test_dir - - # Test assertion - assert_directory_exists "test_dir" -} \ No newline at end of file diff --git a/stp/tests/upgrade/migration_test.bats b/stp/tests/upgrade/migration_test.bats deleted file mode 100755 index 7799d8d..0000000 --- a/stp/tests/upgrade/migration_test.bats +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bats -# Test migration from v1.2.0 to v1.2.1 - -setup() { - # Create a temporary test directory - TEST_TEMP_DIR=$(mktemp -d) - export TEST_TEMP_DIR - cd "$TEST_TEMP_DIR" - - # Initialize STP project - "$BATS_TEST_DIRNAME/../../bin/stp_init" "Test Project" . - - # Remove version file to simulate v1.2.0 - rm -f stp/.config/version - - # Create old-style steel thread files - cat > stp/prj/st/ST0001.md << 'EOF' ---- -verblock: "09 Jul 2025:v0.1: Test Author - Initial version" -stp_version: 1.2.0 -status: In Progress -created: 20250709 -completed: ---- -# ST0001: Test Thread - -- **Status**: In Progress -- **Created**: 2025-07-09 -- **Completed**: -- **Author**: Test Author - -## Objective - -Test objective here - -## Context - -Test context here - -## Approach - -Test approach here - -## Tasks - -- [ ] Task 1 -- [ ] Task 2 - -## Implementation - -Test implementation notes - -## Results - -Test results here - -## Related Steel Threads - -- None -EOF -} - -teardown() { - # Clean up - cd / - rm -rf "$TEST_TEMP_DIR" -} - -@test "migration script converts file to directory structure" { - # Run migration - run "$BATS_TEST_DIRNAME/../../bin/migrate_st_to_dirs" - [ "$status" -eq 0 ] - - # Check backup was created - [ -f ".stp_backup/1.2.1/ST0001.md" ] - - # Check directory was created - [ -d "stp/prj/st/ST0001" ] - - # Check files were created - [ -f "stp/prj/st/ST0001/info.md" ] - [ -f "stp/prj/st/ST0001/design.md" ] - [ -f "stp/prj/st/ST0001/impl.md" ] - [ -f "stp/prj/st/ST0001/tasks.md" ] - [ -f "stp/prj/st/ST0001/results.md" ] - - # Check original file was removed - [ ! -f "stp/prj/st/ST0001.md" ] - - # Check content was preserved - grep -q "Test objective here" "stp/prj/st/ST0001/info.md" - grep -q "Test approach here" "stp/prj/st/ST0001/design.md" - grep -q "Test implementation notes" "stp/prj/st/ST0001/impl.md" - grep -q "Task 1" "stp/prj/st/ST0001/tasks.md" - grep -q "Test results here" "stp/prj/st/ST0001/results.md" -} - -@test "migration updates version to 1.2.1" { - # Run migration - "$BATS_TEST_DIRNAME/../../bin/migrate_st_to_dirs" - - # Check version was updated - grep -q "stp_version: 1.2.1" "stp/prj/st/ST0001/info.md" -} \ No newline at end of file diff --git a/stp/tests/upgrade/test_upgrade.sh b/stp/tests/upgrade/test_upgrade.sh deleted file mode 100755 index cdcb879..0000000 --- a/stp/tests/upgrade/test_upgrade.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash -# test_upgrade.sh - A simplified version of stp_upgrade for testing -# This is a mock script that simulates the upgrade functionality for tests - -# Current STP version -CURRENT_VERSION="1.2.0" - -# Check for force flag -FORCE=0 -if [[ "$1" == "--force" ]]; then - FORCE=1 -fi - -echo "Starting STP upgrade process..." -echo "Current STP version: $CURRENT_VERSION" -echo "" - -echo "Scanning for STP files to upgrade..." - -# Check for steel threads directory -if [ -d "stp/prj/st" ]; then - # Upgrade steel_threads.md - echo "Checking steel_threads.md..." - - # Add section markers to steel_threads.md if needed - if [ -f "stp/prj/st/steel_threads.md" ]; then - if ! grep -q "BEGIN: STEEL_THREAD_INDEX" "stp/prj/st/steel_threads.md"; then - # Add markers (simplified for test) - sed -i.bak 's/## Index/## Index\n\n<!-- BEGIN: STEEL_THREAD_INDEX -->\n<!-- END: STEEL_THREAD_INDEX -->/g' "stp/prj/st/steel_threads.md" - rm -f "stp/prj/st/steel_threads.md.bak" - echo "Added section markers to stp/prj/st/steel_threads.md" - else - echo "Section markers already present in stp/prj/st/steel_threads.md" - fi - fi - - # Process all steel thread files - echo "Upgrading steel thread files..." - for st_file in stp/prj/st/ST*.md; do - if [ -f "$st_file" ]; then - # Extract file version - file_version=$(grep -m 1 "^stp_version:" "$st_file" | sed "s/^stp_version: *//") - - # If no version found, assume 0.0.0 - if [ -z "$file_version" ]; then - file_version="0.0.0" - fi - - echo "Processing $st_file (current version: $file_version)" - - # For ST0001.md (simulating adding frontmatter to file without it) - if [[ "$st_file" == *"ST0001.md"* ]]; then - # Create temp file with frontmatter - cat > "$st_file.tmp" << EOF ---- -stp_version: 1.2.0 -status: In Progress -created: 20250307 -completed: -verblock: "07 Mar 2025:v0.1: Test Author - Initial version" ---- -$(cat "$st_file") -EOF - mv "$st_file.tmp" "$st_file" - echo "Updated: $st_file" - fi - - # For ST0002.md (simulating updating existing frontmatter) - if [[ "$st_file" == *"ST0002.md"* ]]; then - sed -i.bak 's/stp_version: 0.5.0/stp_version: 1.2.0/g' "$st_file" - rm -f "$st_file.bak" - echo "Updated: $st_file" - fi - - # For ST0003.md (simulating major version warning and force upgrade) - if [[ "$st_file" == *"ST0003.md"* ]]; then - echo " Warning: File uses major version 0, current is 1." - if [ $FORCE -eq 1 ]; then - sed -i.bak 's/stp_version: 0.1.0/stp_version: 1.2.0/g' "$st_file" - rm -f "$st_file.bak" - echo " Force-updated: $st_file" - else - echo " Use --force to upgrade this file." - fi - fi - - # For ST0004.md (simulating newer version warning) - if [[ "$st_file" == *"ST0004.md"* ]]; then - echo " Warning: File version ($file_version) is newer than current version ($CURRENT_VERSION)." - echo " This may indicate the file was created with a newer version of STP." - fi - - # For ST0005.md (simulating force upgrade) - if [[ "$st_file" == *"ST0005.md"* ]]; then - if [ $FORCE -eq 1 ]; then - sed -i.bak 's/stp_version: 0.1.0/stp_version: 1.2.0/g' "$st_file" - rm -f "$st_file.bak" - echo " Force-updated: $st_file" - else - echo " Warning: File uses major version 0, current is 1." - echo " Use --force to upgrade this file." - fi - fi - fi - done - - # Fake running the sync command - echo "" - echo "Running sync to update steel_threads.md..." - echo "Mock sync command executed successfully" - -else - echo "No stp/prj/st directory found. Steel threads upgrade skipped." -fi - -echo "" -echo "STP upgrade complete." - -# Always exit with success for tests -exit 0 \ No newline at end of file diff --git a/stp/tests/upgrade/upgrade_test.bats b/stp/tests/upgrade/upgrade_test.bats deleted file mode 100644 index fb345cb..0000000 --- a/stp/tests/upgrade/upgrade_test.bats +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env bats -# Tests for the stp_upgrade script - -load '../lib/test_helper.bash' - -# Setup test environment before each test -setup() { - # Create a temporary test directory - TEST_TEMP_DIR="$(mktemp -d "${STP_TEMP_DIR}/upgrade-test-XXXXXX")" - echo "Setup: Created test directory at ${TEST_TEMP_DIR}" - cd "${TEST_TEMP_DIR}" || exit 1 - - # Create minimal STP directory structure - mkdir -p "stp/prj/st" - mkdir -p "stp/bin" - - # Use our test-specific upgrade script instead - mkdir -p "stp/bin" - cp "${STP_PROJECT_ROOT}/stp/tests/upgrade/test_upgrade.sh" "./stp_upgrade" - chmod +x "./stp_upgrade" - - # Also copy test script to bin directory - cp "${STP_PROJECT_ROOT}/stp/tests/upgrade/test_upgrade.sh" "stp/bin/stp_upgrade" - - # Create a mock stp_st script that just returns success for sync - cat > "stp/bin/stp_st" << 'EOF' -#!/bin/bash -if [ "$1" = "sync" ]; then - echo "Mock sync command executed successfully" - exit 0 -fi -echo "Unknown command: $1" -exit 1 -EOF - - # Make them executable - chmod +x "stp/bin/stp_upgrade" - chmod +x "stp/bin/stp_st" - - # Create a local copy for direct execution - cp "stp/bin/stp_upgrade" "./" - cp "stp/bin/stp_st" "./" - chmod +x "./stp_upgrade" - chmod +x "./stp_st" -} - -# Clean up after each test -teardown() { - if [ -d "${TEST_TEMP_DIR}" ]; then - cd "${STP_PROJECT_ROOT}" || exit 1 - rm -rf "${TEST_TEMP_DIR}" - fi -} - -# Helper function to run the upgrade command -run_upgrade() { - export STP_HOME="${TEST_TEMP_DIR}" - export BATS_TEST_TMPDIR="${TEST_TEMP_DIR}/tmp" - mkdir -p "${BATS_TEST_TMPDIR}" - - cd "${TEST_TEMP_DIR}" || return 1 - - # Make sure the script is executable - chmod +x "./stp_upgrade" - - # Run the command with debugging - echo "Running upgrade command from $(pwd)..." >&2 - echo "STP_HOME=${STP_HOME}" >&2 - ls -la . >&2 - - # Run the command - run ./stp_upgrade "$@" - - echo "Output: $output" >&2 - echo "Status: $status" >&2 -} - -# Helper function to run the upgrade command with force -run_upgrade_force() { - export STP_HOME="${TEST_TEMP_DIR}" - export BATS_TEST_TMPDIR="${TEST_TEMP_DIR}/tmp" - mkdir -p "${BATS_TEST_TMPDIR}" - - cd "${TEST_TEMP_DIR}" || return 1 - - # Make sure the script is executable - chmod +x "./stp_upgrade" - - # Run the command - run ./stp_upgrade --force "$@" - - echo "Output: $output" >&2 - echo "Status: $status" >&2 -} - -# Test upgrading a file without frontmatter -@test "upgrade adds frontmatter to files without it" { - # Create a test steel thread file without frontmatter - cat > "stp/prj/st/ST0001.md" << EOF -# ST0001: Test Steel Thread - -- **Status**: In Progress -- **Created**: 2025-03-07 -- **Completed**: -- **Author**: Test Author - -## Objective -Test objective -EOF - - # Run upgrade - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - - # Check if frontmatter was added - cat "stp/prj/st/ST0001.md" >&2 # Debug output - assert_file_contains "stp/prj/st/ST0001.md" "stp_version: 1.2.0" - # Skip checking for dashes explicitly since they're causing issues with grep - # We've already verified the stp_version is there, which is the key thing -} - -# Test upgrading a file with outdated version -@test "upgrade updates version in existing frontmatter" { - # Create a test steel thread file with old version - cat > "stp/prj/st/ST0002.md" << EOF ---- -stp_version: 0.5.0 -status: Completed ---- -# ST0002: Another Test - -- **Status**: Completed -- **Created**: 2025-03-01 -- **Completed**: 2025-03-07 -- **Author**: Test Author - -## Objective -Test objective -EOF - - # Run upgrade - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - - # Check if version was updated - cat "stp/prj/st/ST0002.md" >&2 # Debug output - assert_file_contains "stp/prj/st/ST0002.md" "stp_version: 1.2.0" - assert_file_contains "stp/prj/st/ST0002.md" "status: Completed" -} - -# Test warning for major version differences -@test "upgrade warns about major version differences" { - # Create a test steel thread file with old major version - cat > "stp/prj/st/ST0003.md" << EOF ---- -stp_version: 0.1.0 ---- -# ST0003: Major Version Test - -- **Status**: Not Started -EOF - - # Run upgrade without force - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - [[ "$output" == *"Warning: File uses major version 0"* ]] - - # Run upgrade with force - run_upgrade_force - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - cat "stp/prj/st/ST0003.md" >&2 # Debug output - assert_file_contains "stp/prj/st/ST0003.md" "stp_version: 1.2.0" -} - -# Test upgrading steel_threads.md without section markers -@test "upgrade adds section markers to steel_threads.md" { - # Create a steel_threads.md file without section markers - cat > "stp/prj/st/steel_threads.md" << EOF -# Steel Threads - -This document serves as an index of all steel threads in the project. - -## Index - -| ID | Title | Status | Created | Completed | -|----|-------|--------|---------|-----------| -| ST0001 | Test Thread | In Progress | 2025-03-07 | | - -## Status Definitions - -- **Not Started**: Steel thread has been created but work has not begun -EOF - - # Run upgrade - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - - # Check if section markers were added - cat "stp/prj/st/steel_threads.md" >&2 # Debug output - assert_file_contains "stp/prj/st/steel_threads.md" "<!-- BEGIN: STEEL_THREAD_INDEX -->" - assert_file_contains "stp/prj/st/steel_threads.md" "<!-- END: STEEL_THREAD_INDEX -->" -} - -# Test handling of files with newer versions -@test "upgrade handles files with newer versions gracefully" { - # Create a test steel thread file with newer version - cat > "stp/prj/st/ST0004.md" << EOF ---- -stp_version: 2.0.0 ---- -# ST0004: Future Version Test - -- **Status**: Completed -EOF - - # Run upgrade - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - [[ "$output" == *"Warning: File version"*"is newer than current version"* ]] - - # Check that file still has newer version - cat "stp/prj/st/ST0004.md" >&2 # Debug output - assert_file_contains "stp/prj/st/ST0004.md" "stp_version: 2.0.0" -} - -# Test the --force option -@test "upgrade --force forces upgrades for major version differences" { - # Create a test steel thread file with old major version - cat > "stp/prj/st/ST0005.md" << EOF ---- -stp_version: 0.1.0 ---- -# ST0005: Force Upgrade Test - -- **Status**: In Progress -EOF - - # Run upgrade with force - run_upgrade_force - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - - # Check that version was updated despite major version difference - cat "stp/prj/st/ST0005.md" >&2 # Debug output - assert_file_contains "stp/prj/st/ST0005.md" "stp_version: 1.2.0" -} - -# Test handling of non-existent directories -@test "upgrade handles non-existent directories gracefully" { - # Remove the st directory - rm -rf "stp/prj/st" - - # Run upgrade - run_upgrade - echo "Exit status: $status" >&2 - # We're accepting any status code since we'll verify results by checking files - # [ "$status" -eq 0 ] - [[ "$output" == *"No stp/prj/st directory found"* ]] -} \ No newline at end of file diff --git a/stp/usr/deployment_guide.md b/stp/usr/deployment_guide.md deleted file mode 100644 index f5c68ba..0000000 --- a/stp/usr/deployment_guide.md +++ /dev/null @@ -1,436 +0,0 @@ ---- -verblock: "06 Mar 2025:v0.1: Matthew Sinclair - Initial version" -stp_version: 1.2.0 ---- -# Deployment Guide - -This deployment guide provides instructions for deploying the Steel Thread Process (STP) system in various environments. It covers installation, configuration, and integration with other tools and workflows. - -## Table of Contents - -1. [Installation](#installation) -2. [Configuration](#configuration) -3. [Integration](#integration) -4. [Maintenance](#maintenance) -5. [Upgrading](#upgrading) -6. [Troubleshooting](#troubleshooting) - -## Installation - -### System Requirements - -- POSIX-compatible shell environment (bash, zsh) -- Git (optional, for version control) -- Text editor with markdown support -- Backlog.md (for task management integration) - -### Installation Methods - -#### Global Installation - -Install STP globally to make it available for all projects: - -```bash -# Clone the STP repository -git clone https://github.com/username/stp.git ~/stp - -# Add STP bin directory to PATH in shell profile -echo 'export STP_HOME=~/stp' >> ~/.bashrc -echo 'export PATH=$PATH:$STP_HOME/bin' >> ~/.bashrc - -# Reload shell configuration -source ~/.bashrc -``` - -#### Project-Specific Installation - -Install STP within a specific project: - -```bash -# From your project directory -git clone https://github.com/username/stp.git .stp - -# Create a local alias for the project -alias stp='./.stp/bin/stp' -``` - -#### Installation Verification - -Verify the installation: - -```bash -stp help -``` - -This should display the help information for STP commands. - -#### Installing Backlog.md - -Install Backlog.md for task management: - -```bash -# Install Backlog globally -npm install -g backlog.md - -# Or install locally in your project -npm install backlog.md - -# Verify installation -backlog --version -``` - -Initialize Backlog in your project: - -```bash -# Initialize Backlog with STP-friendly settings -stp bl init -``` - -## Configuration - -### Environment Variables - -Configure STP behavior using these environment variables: - -| Variable | Purpose | Default | -|-------------|------------------------------|-----------------------------------| -| STP_HOME | Location of STP installation | Path to cloned repository | -| STP_PROJECT | Current project name | Determined from initialization | -| STP_AUTHOR | Default author name | Determined from git configuration | -| STP_EDITOR | Preferred text editor | Determined from system defaults | - -Example configuration in `.bashrc` or `.zshrc`: - -```bash -export STP_HOME=~/stp -export STP_AUTHOR="Jane Doe" -export STP_EDITOR="vim" -``` - -### Project Configuration - -Create a project-specific configuration using `.stp-config`: - -```ini -# STP Project Configuration -PROJECT_NAME="Project Name" -AUTHOR="Default Author" -ST_PREFIX="ST" -``` - -## Integration - -### Version Control Integration - -STP works seamlessly with git and other version control systems: - -#### Recommended .gitignore - -``` -# STP temporary files -.stp-tmp/ - -# Backlog configuration -backlog/config.yml -backlog/.git/ -``` - -#### Commit Practices - -- Commit steel thread documents along with code changes -- Use steel thread IDs in commit messages for traceability - -#### Branch Strategy - -- Create feature branches based on steel threads -- Name branches using steel thread IDs (e.g., `feature/ST0001`) - -### CI/CD Integration - -To integrate STP with CI/CD pipelines: - -1. Include the STP test suite in your CI pipeline: - -```yaml -# Example GitHub Actions workflow -name: STP Tests - -on: [push, pull_request] - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set up test environment - run: ./stp/tests/setup_test_env.sh - - name: Run tests - run: cd stp/tests && ./run_tests.sh -``` - -2. Configure notifications for test failures -3. Add documentation generation steps if needed - -### IDE Integration - -#### VS Code Integration - -1. Install the "Bash Debug" extension for debugging STP scripts -2. Configure `.vscode/tasks.json` for common STP tasks: - -```json -{ - "version": "2.0.0", - "tasks": [ - { - "label": "Run STP Tests", - "type": "shell", - "command": "cd ${workspaceFolder}/stp/tests && ./run_tests.sh", - "group": { - "kind": "test", - "isDefault": true - } - } - ] -} -``` - -#### JetBrains IDE Integration - -1. Configure run configurations for STP commands -2. Set up file watchers for markdown linting -3. Add shell script run configurations for tests - -### LLM Platform Integration - -#### Claude Code Integration - -To integrate STP with Claude Code: - -1. Share the `stp/llm/llm_preamble.md` at the beginning of each session -2. Keep relevant steel thread documents in the context window -3. Use structured templates for consistent information sharing - -Example Claude Code command: - -```bash -claude code --context stp/llm/llm_preamble.md --context stp/prj/st/ST0001.md -``` - -#### Other LLM Integration - -For other LLM platforms: - -1. Create platform-specific scripts to extract and format STP context -2. Maintain a consistent formatting pattern when sharing information -3. Consider implementing automatic context extraction helpers - -## Maintenance - -### Regular Maintenance Tasks - -- Update STP installation periodically -- Review and clean up completed steel threads -- Archive older project documents -- Sync steel thread status with Backlog tasks -- Archive completed tasks in Backlog - -### Backup Practices - -- Include STP documents in regular backups -- Ensure documentation is committed to version control -- Back up Backlog task data (backlog/tasks/, backlog/archive/) -- Export task data periodically: - ```bash - # Export all tasks to JSON - backlog task list --export > backlog-export-$(date +%Y%m%d).json - ``` - -## Upgrading - -### Upgrading STP Installation - -To upgrade a global STP installation: - -```bash -cd $STP_HOME -git pull -``` - -To upgrade a project-specific installation: - -```bash -cd my-project/.stp -git pull -``` - -### Migrating Between Versions - -When upgrading STP with Backlog integration: - -1. **Backup existing data**: - ```bash - # Backup steel threads - cp -r stp/prj/st stp/prj/st.backup - - # Backup Backlog data - cp -r backlog backlog.backup - ``` - -2. **Run upgrade command**: - ```bash - stp upgrade - ``` - -3. **Migrate embedded tasks** (if upgrading from pre-Backlog version): - ```bash - # Migrate all active steel threads - stp migrate --all-active - ``` - -4. **Verify integration**: - ```bash - # Check task status - stp status report - - # Verify tasks in Backlog - stp bl list - ``` - -## Test Suite Deployment - -The STP test suite uses Bats (Bash Automated Testing System) and requires proper setup: - -### Test Dependencies - -The test suite requires the following dependencies: - -- Bats: Core testing framework -- bats-support: Support library for better test output -- bats-assert: Assertion library for test validation -- bats-file: File-related assertions - -### Setting Up the Test Environment - -Run the setup script to install all dependencies: - -```bash -cd stp/tests/ -./setup_test_env.sh -``` - -This script will: - -1. Check for existing Bats installation -2. Install Bats if needed -3. Install required Bats libraries -4. Configure the test environment - -### Test Suite Configuration - -The test suite can be configured through environment variables: - -| Variable | Purpose | Default | -|-----------------|-------------------------------------|-------------------------------| -| BATS_LIB_PATH | Location of Bats libraries | stp/tests/lib | -| STP_TEST_TEMP | Temporary directory for test files | /tmp/stp-test-XXXXXX | -| STP_BIN_PATH | Path to STP executables | Determined from current path | - -### Running Tests in Different Environments - -```bash -# Set custom paths for testing -export STP_BIN_PATH=/custom/path/to/stp/bin -export BATS_LIB_PATH=/custom/path/to/bats/libs - -# Run tests with custom configuration -cd stp/tests/ -./run_tests.sh -``` - -## Troubleshooting - -### Common Issues - -#### Backlog Git Fetch Errors - -If you encounter git fetch errors with Backlog: - -```bash -# Use the STP wrapper instead of direct backlog commands -stp bl list # Instead of: backlog task list - -# Verify remote operations are disabled -backlog config get remoteOperations -# Should return: false - -# If not disabled, fix it: -backlog config set remoteOperations false -``` - -#### Missing Test Dependencies - -If test dependencies are missing: - -```bash -# Re-run the setup script -cd stp/tests/ -./setup_test_env.sh -``` - -#### Test Failures - -For test failures: - -1. Check the test output for specific errors -2. Verify the STP installation is correct -3. Ensure all paths are correctly configured -4. Check for permission issues on script files - -#### Permission Errors - -If you encounter permission errors: - -```bash -# Make scripts executable -chmod +x stp/bin/* -chmod +x stp/tests/*.sh -chmod +x stp/tests/lib/*/src/*.bash -``` - -#### Task Synchronization Issues - -If tasks aren't syncing properly with steel threads: - -```bash -# Check task naming convention (should be "ST#### - Description") -stp bl list | grep "ST[0-9]" - -# Manually sync a specific steel thread -stp status sync ST0001 - -# Force sync all active threads -for st in $(stp st list --status "In Progress" | awk '{print $1}' | grep "^ST"); do - stp status sync "$st" -done -``` - -### Diagnostic Tools - -STP provides several diagnostic tools: - -- `stp help`: Verify command availability -- `run_tests.sh`: Run tests to verify functionality -- Test failure output: Contains detailed error information - -To debug test failures, examine the test output and check the corresponding script functionality. - -### Getting Help - -If you encounter issues: - -1. Check the troubleshooting section in this guide -2. Review the test output for specific errors -3. Consult the STP documentation -4. Submit issues to the STP project repository -5. Refer to the Bats documentation for test-specific problems diff --git a/stp/usr/user_guide.md b/stp/usr/user_guide.md deleted file mode 100644 index fe9bbe9..0000000 --- a/stp/usr/user_guide.md +++ /dev/null @@ -1,476 +0,0 @@ ---- -verblock: "09 Jul 2025:v0.3: Matthew Sinclair - Updated llm command with --symlink option" -stp_version: 1.2.0 ---- -# User Guide - -This user guide provides task-oriented instructions for using the Steel Thread Process (STP) system. It explains how to accomplish common tasks and provides workflow guidance. - -## Table of Contents - -1. [Introduction](#introduction) -2. [Installation](#installation) -3. [Getting Started](#getting-started) -4. [Working with Steel Threads](#working-with-steel-threads) -5. [Working with Backlog](#working-with-backlog) -6. [Documentation Management](#documentation-management) -7. [LLM Collaboration](#llm-collaboration) -8. [Troubleshooting](#troubleshooting) - -## Introduction - -Steel Thread Process (STP) is a system designed to create a structured workflow and documentation process for developers working collaboratively with Large Language Models (LLMs). STP provides templates, scripts, and process guidelines to enhance productivity while ensuring high-quality documentation as a byproduct of the development process. - -### Purpose - -STP helps developers: - -- Organize and track development work -- Create and maintain project documentation -- Collaborate effectively with LLMs -- Preserve context across development sessions - -### Core Concepts - -- **Steel Thread**: A self-contained unit of work focusing on a specific piece of functionality -- **Documentation Structure**: Organized markdown files capturing project information -- **LLM Collaboration**: Patterns for effective work with language models - -## Installation - -### Prerequisites - -- POSIX-compatible shell (bash, zsh) -- Git (optional, for version control) -- Text editor with markdown support -- Backlog.md (for task management integration) - -### Installation Steps - -1. **Global Installation**: - - ```bash - # Clone the STP repository - git clone https://github.com/username/stp.git ~/stp - - # Add STP bin directory to PATH - echo 'export STP_HOME=~/stp' >> ~/.bashrc - echo 'export PATH=$PATH:$STP_HOME/bin' >> ~/.bashrc - - # Reload shell configuration - source ~/.bashrc - ``` - -2. **Project-Specific Installation**: - - ```bash - # From your project directory - git clone https://github.com/username/stp.git .stp - - # Create a local alias for the project - alias stp='./.stp/bin/stp' - ``` - -## Getting Started - -### Initializing a Project - -To set up STP in a new or existing project: - -```bash -# Navigate to project directory -cd my-project - -# Initialize STP with default directories (eng, llm, prj, usr) -stp init "Project Name" - -# Or specify which directories to include -stp init --dirs "eng,llm,prj,usr" "Project Name" - -# Or include all directories (including bin, _templ, tests) -stp init --all "Project Name" - -# Initialize Backlog for task management -stp bl init -``` - -This creates the STP directory structure with template documents and sets up Backlog for task management. - -### Directory Structure - -After initialization with the default directories, you'll have this structure: - -``` -my-project/ -├── stp/ # Project documentation -│ ├── prj/ # Project documentation -│ │ ├── st/ # Steel threads -│ │ └── wip.md # Work in progress -│ ├── eng/ # Engineering docs -│ │ └── tpd/ # Technical Product Design -│ ├── usr/ # User documentation -│ └── llm/ # LLM-specific content -└── backlog/ # Backlog.md task management - ├── tasks/ # Active tasks - ├── drafts/ # Draft tasks - └── config.yml # Backlog configuration -``` - -If you use the `--all` option or include specific directories with `--dirs`, additional directories may be included: - -``` -my-project/ -└── stp/ - ├── bin/ # STP scripts (only with --all or --dirs "bin") - ├── _templ/ # Templates (only with --all or --dirs "_templ") - └── tests/ # Tests (only with --all or --dirs "tests") -``` - -Note: Even when not copying bin files to the new project, STP commands will still work because they execute from the centrally installed location. - -## Working with Steel Threads - -### Creating a Steel Thread - -To create a new steel thread: - -```bash -stp st new "Implement Feature X" -``` - -This creates a new steel thread document (e.g., `stp/prj/st/ST0001.md`) and adds it to the index. - -### Viewing Steel Threads - -To list all steel threads: - -```bash -# Basic list of all steel threads -stp st list - -# Filter by status -stp st list --status "In Progress" - -# Adjust table width (useful for wide terminals) -stp st list --width 120 -``` - -To view a specific steel thread: - -```bash -stp st show ST0001 -``` - -To edit a steel thread in your default editor: - -```bash -stp st edit ST0001 -``` - -### Synchronizing Steel Threads - -To update the steel threads index file with information from individual ST files: - -```bash -# Preview changes without writing to file -stp st sync - -# Write changes to steel_threads.md -stp st sync --write - -# Adjust output width -stp st sync --write --width 120 -``` - -### Completing a Steel Thread - -When all tasks in a steel thread are done: - -```bash -stp st done ST0001 -``` - -This updates the status and completion date. - -## Working with Backlog - -STP integrates with Backlog.md for fine-grained task management. The `stp bl` wrapper provides a streamlined interface that avoids common issues like git fetch errors. - -### Initializing Backlog - -To set up Backlog in your project: - -```bash -# Initialize Backlog with STP-friendly settings -stp bl init -``` - -This configures Backlog for local use, disabling remote operations that can cause errors. - -### Creating Tasks - -Tasks are linked to steel threads for traceability: - -```bash -# Create a task linked to a steel thread -stp bl create ST0001 "Implement user authentication" - -# Or use the task command -stp task create ST0001 "Add password validation" -``` - -### Listing Tasks - -View all tasks or filter by steel thread: - -```bash -# List all tasks (without git errors) -stp bl list - -# List tasks for a specific steel thread -stp task list ST0001 - -# View tasks in Kanban board -stp bl board -``` - -### Managing Task Status - -Update task status as work progresses: - -```bash -# Edit a task -stp bl task edit task-5 --status "In Progress" - -# Mark a task as done -stp bl task edit task-5 --status Done -``` - -### Synchronizing Status - -Keep steel thread status in sync with task completion: - -```bash -# View status summary -stp status show ST0001 - -# Sync steel thread status based on tasks -stp status sync ST0001 - -# Generate status report for all active threads -stp status report -``` - -### Migrating Existing Tasks - -If you have embedded tasks in steel threads, migrate them to Backlog: - -```bash -# Migrate tasks from a specific steel thread -stp migrate ST0001 - -# Preview migration without making changes -stp migrate --dry-run ST0001 - -# Migrate all active steel threads -stp migrate --all-active -``` - -### Best Practices - -1. **Use the wrapper**: Always use `stp bl` instead of `backlog` directly to avoid git errors -2. **Task naming**: Tasks are automatically named with the pattern "ST#### - Description" -3. **Regular syncing**: Run `stp status sync` to keep steel thread status current -4. **Task granularity**: Create tasks that can be completed in 1-2 days - -## Documentation Management - -STP provides a structured approach to managing project documentation: - -### Updating Technical Product Design - -The technical product design document is the central reference for the project: - -```bash -# Open the TPD document -stp tpd -``` - -When making significant changes to the project, update the TPD to keep it in sync with the implementation. - -### Working with User Documentation - -User documentation is maintained in the `stp/usr/` directory: - -- `user_guide.md`: Task-oriented instructions for users -- `reference_guide.md`: Comprehensive reference information -- `deployment_guide.md`: Installation and deployment guidance - -Update these documents as features are added or changed. - -## LLM Collaboration - -STP is designed for effective collaboration with Large Language Models like Claude: - -### Using the LLM Preamble - -The LLM preamble file contains context that should be shared with LLMs at the beginning of each session: - -```bash -# View the LLM preamble -cat stp/llm/llm_preamble.md -``` - -Include this preamble when starting new sessions with an LLM to provide essential context. - -### Understanding STP Usage Patterns - -STP provides usage rules documentation specifically designed for LLMs: - -```bash -# Display usage patterns and workflows for LLMs -stp llm usage_rules - -# Create symlink for Elixir projects (or other tools expecting usage-rules.md) -stp llm usage_rules --symlink - -# Save to a file for reference -stp llm usage_rules > usage-rules.md -``` - -This document helps LLMs understand: -- How to use STP commands effectively -- Common workflows and best practices -- Steel thread management patterns -- Task integration with Backlog.md - -### Contextualizing Work with Steel Threads - -When working with an LLM on a specific steel thread: - -```bash -# Share the steel thread document with the LLM -stp st show ST0001 | [send to LLM] -``` - -This provides the LLM with task-specific context for more effective collaboration. - -## Testing - -STP includes a comprehensive test suite to verify functionality: - -### Running Tests - -To run the test suite: - -```bash -# Run all tests -cd stp/tests/ -./run_tests.sh - -# Run specific test suite -./run_tests.sh bootstrap -``` - -### Test Structure - -Tests are organized by component: -- `bootstrap_test.bats`: Tests for bootstrap script -- `init_test.bats`: Tests for init command -- `st_test.bats`: Tests for steel thread commands -- `help_test.bats`: Tests for help system -- `main_test.bats`: Tests for main script - -## Upgrading STP - -When new versions of STP are released, you may need to upgrade your existing STP projects to ensure compatibility with the latest features. - -### Running the Upgrade Command - -To upgrade all STP files in your project to the latest format: - -```bash -stp upgrade -``` - -This command: -- Updates metadata in all STP files -- Adds or updates YAML frontmatter -- Ensures files follow the current format standards -- Adds section markers for automatic sync - -### Forcing Upgrades - -For major version differences, the upgrade command will warn you before proceeding. To force the upgrade: - -```bash -stp upgrade --force -``` - -### After Upgrading - -After upgrading, it's a good practice to: - -1. Review updated files to ensure everything looks correct -2. Run a sync to update the steel threads index: - ```bash - stp st sync --write - ``` -3. Commit the changes if you're using version control - -## Troubleshooting - -### Common Issues - -#### STP Commands Not Found - -If STP commands are not found: - -```bash -# Check STP_HOME environment variable -echo $STP_HOME - -# Ensure STP bin directory is in PATH -echo $PATH | grep stp - -# Fix PATH if needed -export PATH=$PATH:$STP_HOME/bin -``` - -#### Permission Issues - -If you encounter permission errors: - -```bash -# Make scripts executable -chmod +x $STP_HOME/bin/* -``` - -#### Template Generation Errors - -If template generation fails, check file permissions and ensure template files exist in the `_templ` directory. - -#### Backlog Git Fetch Errors - -If you see git fetch errors when using Backlog: - -```bash -# Use the STP wrapper instead -stp bl list # Instead of: backlog task list - -# Ensure remote operations are disabled -backlog config get remoteOperations -# Should return: false -``` - -#### Task Not Found - -If tasks aren't showing up: - -```bash -# Check task files exist -ls backlog/tasks/ - -# Use --plain flag if needed -backlog task list --plain -``` diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..cfcd78f --- /dev/null +++ b/tests/README.md @@ -0,0 +1,152 @@ +# Intent Test Suite + +## Overview + +This directory contains the test suite for Intent v2.2.0. The tests are written using [Bats](https://github.com/bats-core/bats-core) (Bash Automated Testing System). + +## Directory Structure + +``` +tests/ +├── unit/ # Unit tests for individual commands +│ ├── basic.bats # Basic infrastructure tests +│ ├── config.bats # Configuration and PROJECT_ROOT tests +│ ├── global_commands.bats # Tests for global commands +│ ├── migration.bats # Migration and backup tests +│ └── project_commands.bats # Tests for project-specific commands +├── integration/ # Integration tests +│ └── end_to_end.bats # Full workflow tests +├── fixtures/ # Test fixtures (sample files, etc.) +├── lib/ # Test libraries +│ └── test_helper.bash # Common test functions +├── run_tests.sh # Main test runner +└── README.md # This file +``` + +## Prerequisites + +Install Bats: + +```bash +# macOS with Homebrew +brew install bats-core + +# Or from source +git clone https://github.com/bats-core/bats-core.git +cd bats-core +./install.sh /usr/local +``` + +## Running Tests + +### Run all tests: +```bash +./tests/run_tests.sh +``` + +### Run specific test file: +```bash +./tests/run_tests.sh tests/unit/global_commands.bats +``` + +### Run all unit tests: +```bash +./tests/run_tests.sh tests/unit/ +``` + +### Run all integration tests: +```bash +./tests/run_tests.sh tests/integration/ +``` + +## Writing Tests + +### Basic Test Structure + +```bash +#!/usr/bin/env bats + +load "../lib/test_helper.bash" + +@test "description of what you're testing" { + # Setup + project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + # Run command + run run_intent <command> <args> + + # Assert results + assert_success # or assert_failure + assert_output_contains "expected text" +} +``` + +### Available Helper Functions + +- `create_test_project "name"` - Creates a test Intent project +- `run_intent <args>` - Runs the intent command +- `assert_success` - Asserts command succeeded (exit 0) +- `assert_failure` - Asserts command failed (exit non-zero) +- `assert_output_contains "text"` - Checks if output contains text +- `assert_file_exists "path"` - Checks if file exists +- `assert_directory_exists "path"` - Checks if directory exists +- `assert_file_contains "file" "text"` - Checks if file contains text + +## Test Categories + +### Unit Tests + +Unit tests focus on individual commands and features: + +- **global_commands.bats** - Tests commands that work without a project (help, doctor, info, etc.) +- **project_commands.bats** - Tests commands that require a project context (st, bl, task, etc.) +- **config.bats** - Tests configuration loading and PROJECT_ROOT detection +- **migration.bats** - Tests backup creation and version migration + +### Integration Tests + +Integration tests verify complete workflows: + +- **end_to_end.bats** - Tests full user workflows like creating a project and managing steel threads + +## Key Test Scenarios + +1. **Global vs Project Commands** + - Global commands work anywhere + - Project commands show helpful error outside projects + +2. **Configuration** + - PROJECT_ROOT detected from subdirectories + - Config files loaded correctly + - Legacy projects detected + +3. **Error Handling** + - No silent failures + - Clear error messages + - Helpful suggestions + +4. **Migration** + - Backup directories use `.backup_*` prefix + - Version fields use `intent_version` + - Legacy projects can be upgraded + +## Debugging Tests + +To see more output when debugging: +```bash +# Run with verbose output +bats -v tests/unit/config.bats + +# Run with tap output +bats -t tests/unit/config.bats +``` + +## CI/CD + +Tests should be run on: +- Every push to main +- Every pull request +- Multiple OS versions (macOS, Linux) + +See `.github/workflows/test.yml` for CI configuration (TODO). \ No newline at end of file diff --git a/tests/core_functionality.bats b/tests/core_functionality.bats new file mode 100644 index 0000000..7b6b205 --- /dev/null +++ b/tests/core_functionality.bats @@ -0,0 +1,76 @@ +#!/usr/bin/env bats +# Core functionality tests - the essential tests that must pass + +load "lib/test_helper.bash" + +@test "intent shows info when run with no args" { + run run_intent + assert_success + assert_output_contains "Intent: The Steel Thread Process" + assert_output_contains "Installation:" +} + +@test "intent help works globally" { + run run_intent help + assert_success + assert_output_contains "Usage: intent" +} + +@test "intent st list requires project and shows clear error" { + # Outside project + run run_intent st list + assert_failure + assert_output_contains "Not in an Intent project directory" + assert_output_contains "The 'st' command requires an Intent project" +} + +@test "intent bl command is fixed and callable" { + # Create project for bl test + project_dir=$(create_test_project "BL Test") + cd "$project_dir" + touch backlog/Backlog.md + + # Should show help since bl needs subcommand + run run_intent bl + assert_success + assert_output_contains "backlog" +} + +@test "no more silent failures - all commands give feedback" { + # Test a project command outside project + run run_intent st new "Test" + assert_failure + # Should see error, not silence + [ -n "$output" ] || fail "Expected output but got none" + assert_output_contains "Not in an Intent project" +} + +@test "backup uses new .backup_ prefix not .stp_backup_" { + project_dir=$(create_test_project "Backup Test") + cd "$project_dir" + + # Source helpers + source "${INTENT_BIN_DIR}/intent_helpers" + + # Create backup + create_project_backup "$project_dir" + + # Check new prefix is used + backup_dirs=(.backup_*) + [ -d "${backup_dirs[0]}" ] || fail "No .backup_* directory found" + + # Old prefix should not exist + if ls .stp_backup_* 2>/dev/null; then + fail "Found old .stp_backup_* directory" + fi +} + +@test "all bin scripts use intent not stp references" { + # Check intent_bl calls intent_backlog not stp_backlog + assert_file_contains "${INTENT_BIN_DIR}/intent_bl" "intent_backlog" + + # Should not contain old stp_backlog reference + if grep -q "stp_backlog" "${INTENT_BIN_DIR}/intent_bl"; then + fail "intent_bl still contains stp_backlog reference" + fi +} \ No newline at end of file diff --git a/tests/integration/end_to_end.bats b/tests/integration/end_to_end.bats new file mode 100644 index 0000000..7cb4910 --- /dev/null +++ b/tests/integration/end_to_end.bats @@ -0,0 +1,98 @@ +#!/usr/bin/env bats +# End-to-end integration tests + +load "../lib/test_helper.bash" + +@test "complete workflow: create project, add steel thread, list it" { + # Start in temp directory + original_dir=$(pwd) + + # Create new project + mkdir test_project + cd test_project + + # Initialize project + run run_intent init "Integration Test Project" + # Skip for now as init needs interactive input + + # Create project manually for testing + cd "$original_dir" + project_dir=$(create_test_project "Integration Test") + cd "$project_dir" + + # Verify project structure + assert_directory_exists "intent/st" + assert_file_exists ".intent/config.json" + + # Check info shows project details + run run_intent info + assert_success + assert_output_contains "Project:" + assert_output_contains "Name: Integration Test" + + # Create a steel thread manually (st new needs editor) + mkdir -p "intent/st/ST0001" + cat > "intent/st/ST0001/info.md" << EOF +--- +id: ST0001 +title: Test Feature Implementation +status: In Progress +created: $(date +%Y-%m-%d) +author: test_user +intent_version: 2.0.0 +--- + +# ST0001: Test Feature Implementation + +## Metadata +- **Status**: In Progress +- **Created**: $(date +%Y-%m-%d) +- **Author**: test_user + +## Description +This is a test steel thread for integration testing. +EOF + + # List steel threads + run run_intent st list + assert_success + assert_output_contains "ST0001" + assert_output_contains "Test Feature Implement" # Title is truncated to ~25 chars in list + assert_output_contains "In Progress" + + # Check st show works + run run_intent st show ST0001 + assert_success + assert_output_contains "Test Feature" # Title might be truncated in list view +} + +@test "error handling: project commands fail gracefully outside project" { + # Commands that require project + local project_commands=("st" "bl" "task" "migrate" "status") + + for cmd in "${project_commands[@]}"; do + run run_intent "$cmd" + assert_failure + assert_output_contains "Not in an Intent project directory" + assert_output_contains "The '$cmd' command requires an Intent project" + done +} + +@test "global commands work outside project" { + # Commands that work anywhere + run run_intent + assert_success + assert_output_contains "Intent: The Steel Thread Process" + + run run_intent help + assert_success + assert_output_contains "Usage: intent" + + run run_intent doctor + assert_success + assert_output_contains "Intent Doctor" + + run run_intent info + assert_success + assert_output_contains "Not in an Intent project directory" +} \ No newline at end of file diff --git a/tests/lib/test_helper.bash b/tests/lib/test_helper.bash new file mode 100644 index 0000000..2f7b233 --- /dev/null +++ b/tests/lib/test_helper.bash @@ -0,0 +1,183 @@ +#!/usr/bin/env bash +# Test helper functions and setup for Intent tests + +# Set up project-specific paths +# Use absolute paths to ensure tests work from any directory +INTENT_PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +INTENT_BIN_DIR="${INTENT_PROJECT_ROOT}/bin" +INTENT_TEST_FIXTURES="${INTENT_PROJECT_ROOT}/tests/fixtures" +INTENT_TEMP_DIR="${INTENT_PROJECT_ROOT}/tests/tmp" + +# Export INTENT_HOME for tests +export INTENT_HOME="${INTENT_PROJECT_ROOT}" + +# Create temporary test directory +setup_file() { + mkdir -p "${INTENT_TEMP_DIR}" +} + +# Clean up test directory after all tests in file +teardown_file() { + if [ -d "${INTENT_TEMP_DIR}" ]; then + rm -rf "${INTENT_TEMP_DIR}" + fi +} + +# Create a temporary test directory for each test +setup() { + # Create temp dir outside of Intent project to test "outside project" scenarios + TEST_TEMP_DIR="$(mktemp -d /tmp/intent-test-XXXXXX)" + cd "${TEST_TEMP_DIR}" || exit 1 +} + +# Clean up temporary test directory after each test +teardown() { + if [ -d "${TEST_TEMP_DIR}" ]; then + cd "${INTENT_PROJECT_ROOT}" || exit 1 + rm -rf "${TEST_TEMP_DIR}" + fi +} + +# Helper function to create a test Intent project +create_test_project() { + local project_name="${1:-Test Project}" + local dir="${2:-$TEST_TEMP_DIR/test-project}" + + mkdir -p "$dir/.intent" + cat > "$dir/.intent/config.json" << EOF +{ + "intent_version": "2.0.0", + "project_name": "$project_name", + "author": "test_user", + "created_date": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" +} +EOF + + # Create standard directories + mkdir -p "$dir/intent/st/COMPLETED" + mkdir -p "$dir/intent/st/NOT-STARTED" + mkdir -p "$dir/intent/st/CANCELLED" + mkdir -p "$dir/intent/eng/tpd" + mkdir -p "$dir/intent/ref" + mkdir -p "$dir/intent/llm" + mkdir -p "$dir/backlog" + + echo "$dir" +} + +# Helper function to run intent command +run_intent() { + "${INTENT_BIN_DIR}/intent" "$@" +} + +# Helper to check if command output contains expected text +assert_output_contains() { + local expected="$1" + if [[ "$output" != *"$expected"* ]]; then + echo "Expected output to contain: $expected" + echo "Actual output: $output" + return 1 + fi +} + +# Helper to check if command succeeded +assert_success() { + if [ "$status" -ne 0 ]; then + echo "Expected command to succeed, but it failed with status $status" + echo "Output: $output" + return 1 + fi +} + +# Helper to check if command failed +assert_failure() { + if [ "$status" -eq 0 ]; then + echo "Expected command to fail, but it succeeded" + echo "Output: $output" + return 1 + fi +} + +# Helper for test failures +fail() { + echo "$1" + return 1 +} + +# Helper to check if file exists +assert_file_exists() { + local file="$1" + if [ ! -f "$file" ]; then + echo "Expected file to exist: $file" + return 1 + fi +} + +# Helper to check if directory exists +assert_directory_exists() { + local dir="$1" + if [ ! -d "$dir" ]; then + echo "Expected directory to exist: $dir" + return 1 + fi +} + +# Helper to check if file contains text +assert_file_contains() { + local file="$1" + local text="$2" + if ! grep -qF "$text" "$file"; then + echo "Expected file $file to contain: $text" + echo "File contents:" + cat "$file" + return 1 + fi +} + +# Helper to check if file does not exist +assert_file_not_exists() { + local file="$1" + if [ -f "$file" ]; then + echo "Expected file to not exist: $file" + return 1 + fi +} + +# Helper to check if output does not contain text +refute_output_contains() { + local text="$1" + if [[ "$output" == *"$text"* ]]; then + echo "Expected output to NOT contain: $text" + echo "Actual output: $output" + return 1 + fi +} + +# Helper to check exact output match +assert_output() { + local expected="$1" + if [[ "$output" != "$expected" ]]; then + echo "Expected output: $expected" + echo "Actual output: $output" + return 1 + fi +} + +# Helper to get Intent version from VERSION file +get_intent_version() { + # First try the VERSION file in the Intent installation + if [ -f "${INTENT_HOME}/VERSION" ]; then + cat "${INTENT_HOME}/VERSION" + elif [ -f "${INTENT_PROJECT_ROOT}/VERSION" ]; then + cat "${INTENT_PROJECT_ROOT}/VERSION" + elif [ -f "${INTENT_PROJECT_ROOT}/.intent/config.json" ]; then + # Fallback to config.json for compatibility + jq -r '.version // .intent_version // "2.2.1"' "${INTENT_PROJECT_ROOT}/.intent/config.json" + else + echo "2.2.1" + fi +} + +# Load bats libraries if available +# Note: bats libraries can be installed globally or added to tests/lib/ +# For now, we rely on the basic assert functions defined above \ No newline at end of file diff --git a/tests/run_tests.sh b/tests/run_tests.sh new file mode 100755 index 0000000..19013a3 --- /dev/null +++ b/tests/run_tests.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# run_tests.sh - Run the Intent test suite +# Usage: ./run_tests.sh [test_path] + +# Set up colors for output +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Function to display error messages +error() { + echo -e "${RED}Error: $1${NC}" >&2 + exit 1 +} + +# Function to display success messages +success() { + echo -e "${GREEN}$1${NC}" +} + +# Function to display warning messages +warning() { + echo -e "${YELLOW}Warning: $1${NC}" +} + +# Function to display information messages +info() { + echo -e "${BLUE}$1${NC}" +} + +# Check if bats is installed +if ! command -v bats &> /dev/null; then + error "Bats is not installed. Please install it first: + +On macOS with Homebrew: + brew install bats-core + +Or install from source: + git clone https://github.com/bats-core/bats-core.git + cd bats-core + ./install.sh /usr/local" +fi + +# Get the directory where this script is located +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Export INTENT_HOME for tests +export INTENT_HOME="$PROJECT_ROOT" + +# Export BATS_LIB_PATH for bats libraries +export BATS_LIB_PATH="$SCRIPT_DIR/lib" + +# Set default test path - only run new Intent tests by default +# To run old STP tests: ./tests/run_tests.sh ../stp/tests +TEST_PATH="${1:-$SCRIPT_DIR}" + +# Check if test path exists +if [ ! -e "$TEST_PATH" ]; then + error "Test path does not exist: $TEST_PATH" +fi + +# Display test information +echo +info "Intent Test Suite" +info "================" +info "INTENT_HOME: $INTENT_HOME" +info "Test path: $TEST_PATH" +echo + +# Run the tests +if [ -d "$TEST_PATH" ]; then + # If directory, run all .bats files in it (excluding lib directory) + info "Running all tests in directory: $TEST_PATH" + find "$TEST_PATH" -name "*.bats" -type f -not -path "*/lib/*" | sort | xargs bats +else + # If file, run just that file + info "Running test file: $TEST_PATH" + bats "$TEST_PATH" +fi + +# Check exit status +EXIT_STATUS=$? + +echo +if [ $EXIT_STATUS -eq 0 ]; then + success "All tests passed!" +else + error "Some tests failed!" +fi + +exit $EXIT_STATUS \ No newline at end of file diff --git a/tests/unit/agent_commands.bats b/tests/unit/agent_commands.bats new file mode 100755 index 0000000..020e105 --- /dev/null +++ b/tests/unit/agent_commands.bats @@ -0,0 +1,747 @@ +#!/usr/bin/env bats +# Tests for intent claude subagents commands (v2.3.0) + +load "../lib/test_helper.bash" + +# Setup/teardown for agent tests +setup() { + # Create temp dir outside of Intent project + TEST_TEMP_DIR="$(mktemp -d /tmp/intent-test-XXXXXX)" + cd "${TEST_TEMP_DIR}" || exit 1 + + # Create a mock .claude directory for testing + mkdir -p "$HOME/.claude/agents" + + # Save any existing agents + if [ -d "$HOME/.claude/agents.backup" ]; then + rm -rf "$HOME/.claude/agents.backup" + fi + if [ -d "$HOME/.claude/agents" ] && [ "$(ls -A $HOME/.claude/agents 2>/dev/null)" ]; then + cp -r "$HOME/.claude/agents" "$HOME/.claude/agents.backup" + fi + + # Clean the agents directory for testing + rm -f "$HOME/.claude/agents"/*.md 2>/dev/null || true +} + +teardown() { + # Restore backed up agents if they exist + if [ -d "$HOME/.claude/agents.backup" ]; then + rm -rf "$HOME/.claude/agents" + mv "$HOME/.claude/agents.backup" "$HOME/.claude/agents" + fi + + # Clean up test directory + if [ -d "${TEST_TEMP_DIR}" ]; then + cd "${INTENT_PROJECT_ROOT}" || exit 1 + rm -rf "${TEST_TEMP_DIR}" + fi + + # Clean up test manifests + rm -rf "$HOME/.intent/agents" 2>/dev/null || true +} + +@test "claude subagents command shows help when no subcommand given" { + run run_intent claude subagents + assert_success + assert_output_contains "Usage: intent claude subagents <command>" + assert_output_contains "list" + assert_output_contains "install" +} + +@test "claude subagents list shows available agents" { + run run_intent claude subagents list + assert_success + assert_output_contains "Available Agents:" + assert_output_contains "Global:" + assert_output_contains "intent" + assert_output_contains "elixir" +} + +@test "claude subagents list shows installation status" { + # Initially nothing installed + run run_intent claude subagents list + assert_success + assert_output_contains "[NOT INSTALLED]" + + # Install an agent manually + cp "${INTENT_HOME}/intent/plugins/claude/subagents/intent/agent.md" "$HOME/.claude/agents/intent.md" + + # Check it shows as installed + run run_intent claude subagents list + assert_success + assert_output_contains "intent - Intent-aware assistant for steel threads and backlog management [INSTALLED]" + assert_output_contains "elixir - Elixir code doctor with Usage Rules and Ash/Phoenix patterns [NOT INSTALLED]" +} + +@test "claude subagents install requires an agent name" { + run run_intent claude subagents install + assert_failure + assert_output_contains "Error: No agent specified" + assert_output_contains "Usage: intent claude subagents install" +} + +@test "claude subagents install installs a single agent" { + run run_intent claude subagents install intent --force + assert_success + assert_output_contains "Installing agent: intent" + assert_output_contains "Installed successfully" + assert_output_contains "Installation complete:" + assert_output_contains "Installed:" + + # Verify the file was created + assert_file_exists "$HOME/.claude/agents/intent.md" + + # Verify it shows as installed + run run_intent claude subagents list + assert_success + assert_output_contains "intent - Intent-aware assistant for steel threads and backlog management [INSTALLED]" +} + +@test "claude subagents install handles non-existent agent" { + run run_intent claude subagents install nonexistent + assert_failure # Command fails when no agents installed + assert_output_contains "Error: Agent 'nonexistent' not found" + assert_output_contains "Failed: 1" +} + +@test "claude subagents install prompts before overwriting" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Try to install again, saying no to overwrite + run bash -c "echo 'n' | ${INTENT_BIN_DIR}/intent claude subagents install intent" + assert_success + assert_output_contains "Agent already exists" + assert_output_contains "Skipped" + assert_output_contains "Skipped: 1" +} + +@test "claude subagents install can overwrite when confirmed" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Modify the agent to test overwrite + echo "# Modified" >> "$HOME/.claude/agents/intent.md" + + # Try to install again, saying yes to overwrite + run bash -c "echo 'y' | ${INTENT_BIN_DIR}/intent claude subagents install intent" + assert_success + assert_output_contains "Agent already exists" + assert_output_contains "Installed successfully" + assert_output_contains "Installation complete:" + assert_output_contains "Installed:" + + # Verify modification was overwritten + run grep "# Modified" "$HOME/.claude/agents/intent.md" + assert_failure +} + +@test "claude subagents install supports multiple agents" { + run run_intent claude subagents install intent elixir --force + assert_success + assert_output_contains "Installing agent: intent" + assert_output_contains "Installing agent: elixir" + # Test that at least 2 agents were installed (not exact count) + assert_output_contains "Installation complete:" + assert_output_contains "Installed:" + + # Verify both files exist + assert_file_exists "$HOME/.claude/agents/intent.md" + assert_file_exists "$HOME/.claude/agents/elixir.md" +} + +@test "claude subagents install --all installs all available agents" { + run run_intent claude subagents install --all --force + assert_success + assert_output_contains "Installing agent: intent" + assert_output_contains "Installing agent: elixir" + assert_output_contains "Installing agent: socrates" + # Test that installation completed (not exact count) + assert_output_contains "Installation complete:" + assert_output_contains "Installed:" + + # Verify all agents are installed + run run_intent claude subagents list + assert_success + assert_output_contains "intent - Intent-aware assistant for steel threads and backlog management [INSTALLED]" + assert_output_contains "elixir - Elixir code doctor with Usage Rules and Ash/Phoenix patterns [INSTALLED]" + assert_output_contains "socrates - CTO Review Mode for technical decision-making via Socratic dialog [INSTALLED]" +} + +@test "claude subagents install creates manifest" { + # Clean any existing manifest + rm -rf "$HOME/.intent/agents" 2>/dev/null || true + + run run_intent claude subagents install intent --force + assert_success + + # Check manifest was created + assert_file_exists "$HOME/.intent/agents/installed-agents.json" + + # Verify manifest content + run cat "$HOME/.intent/agents/installed-agents.json" + assert_success + assert_output_contains '"name": "intent"' + assert_output_contains '"source": "global"' + assert_output_contains '"checksum":' +} + +@test "claude subagents install updates manifest on reinstall" { + # Install once + run run_intent claude subagents install intent --force + assert_success + + # Get original timestamp + original_manifest=$(cat "$HOME/.intent/agents/installed-agents.json") + + # Wait a moment and reinstall + sleep 1 + run bash -c "echo 'y' | ${INTENT_BIN_DIR}/intent claude subagents install intent" + assert_success + + # Verify manifest was updated + new_manifest=$(cat "$HOME/.intent/agents/installed-agents.json") + [ "$original_manifest" != "$new_manifest" ] || fail "Manifest should have been updated" + + # Should still only have one entry for intent + count=$(jq '.installed | map(select(.name == "intent")) | length' "$HOME/.intent/agents/installed-agents.json") + [ "$count" -eq 1 ] || fail "Should only have one entry for intent agent" +} + +@test "agents handles missing Claude directory gracefully" { + # Remove .claude directory + rm -rf "$HOME/.claude" + + run run_intent claude subagents list + assert_success + assert_output_contains "Note: Claude Code not detected" + + run run_intent claude subagents install intent + assert_failure + assert_output_contains "Error: Claude Code not detected" +} + +@test "agents creates .claude/agents directory if missing" { + # Ensure .claude exists but not agents subdirectory + mkdir -p "$HOME/.claude" + rm -rf "$HOME/.claude/agents" + + run run_intent claude subagents install intent --force + assert_success + assert_directory_exists "$HOME/.claude/agents" + assert_file_exists "$HOME/.claude/agents/intent.md" +} + +@test "agents command handles invalid subcommand" { + run run_intent claude subagents invalid + assert_failure + assert_output_contains "Error: Unknown command 'intent claude subagents invalid'" + assert_output_contains "Run 'intent claude subagents help' for usage" +} + +@test "agents works from within a project" { + project_dir=$(create_test_project "Agent Test Project") + cd "$project_dir" + + # Should work the same from within a project + run run_intent claude subagents list + assert_success + assert_output_contains "Available Agents:" + + run run_intent claude subagents install intent --force + assert_success + assert_output_contains "Installed successfully" +} + +# Sync command tests +@test "claude subagents sync requires installed agents" { + # Clean manifest + rm -rf "$HOME/.intent/agents" 2>/dev/null || true + + run run_intent claude subagents sync + assert_success + assert_output_contains "No installed agents found" + assert_output_contains "Use 'intent claude subagents install'" +} + +@test "claude subagents sync detects up-to-date agents" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Sync should find nothing to update + run run_intent claude subagents sync + assert_success + assert_output_contains "Checking agent: intent" + assert_output_contains "Up to date" + assert_output_contains "Skipped: 1" +} + +@test "claude subagents sync detects local modifications" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Modify the agent + echo "# Test modification" >> "$HOME/.claude/agents/intent.md" + + # Sync should detect modification + run bash -c "echo 'n' | ${INTENT_BIN_DIR}/intent claude subagents sync" + assert_success + assert_output_contains "Warning: Agent has been modified locally" + assert_output_contains "Overwrite local changes?" + assert_output_contains "Skipped" +} + +@test "claude subagents sync can force overwrite modifications" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Modify the agent + echo "# Test modification" >> "$HOME/.claude/agents/intent.md" + + # Force sync should overwrite + run run_intent claude subagents sync --force + assert_success + assert_output_contains "Warning: Agent has been modified locally" + assert_output_contains "Overwriting local changes (--force)" + assert_output_contains "Updated successfully" + + # Verify modification was removed + run grep "# Test modification" "$HOME/.claude/agents/intent.md" + assert_failure +} + +@test "claude subagents sync updates when source changes" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Simulate source update by modifying the source file + # (In real scenario, this would be from a git pull) + echo "# Source update" >> "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + + # Sync should detect and update + run run_intent claude subagents sync + assert_success + assert_output_contains "Update available" + assert_output_contains "Updated successfully" + assert_output_contains "Updated: 1" + + # Verify update was applied + run grep "# Source update" "$HOME/.claude/agents/intent.md" + assert_success + + # Clean up source modification + sed -i.bak '/# Source update/d' "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + rm -f "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md.bak" +} + +@test "claude subagents sync handles missing Claude directory" { + # Remove .claude directory + rm -rf "$HOME/.claude" + + run run_intent claude subagents sync + assert_failure + assert_output_contains "Error: Claude Code not detected" +} + +@test "claude subagents sync works with multiple agents" { + # Install multiple agents + run run_intent claude subagents install intent elixir --force + assert_success + + # Sync should check both + run run_intent claude subagents sync + assert_success + assert_output_contains "Checking agent: intent" + assert_output_contains "Checking agent: elixir" + assert_output_contains "Up to date" +} + +# Uninstall command tests +@test "claude subagents uninstall requires an agent name" { + run run_intent claude subagents uninstall + assert_failure + assert_output_contains "Error: No agent specified" + assert_output_contains "Usage: intent claude subagents uninstall" +} + +@test "claude subagents uninstall removes a single agent" { + # Install an agent first + run run_intent claude subagents install intent --force + assert_success + + # Uninstall with force + run run_intent claude subagents uninstall intent --force + assert_success + assert_output_contains "Uninstalling agent: intent" + assert_output_contains "Removed successfully" + assert_output_contains "Removed: 1" + + # Verify it's gone + assert_file_not_exists "$HOME/.claude/agents/intent.md" + + # Verify it shows as not installed + run run_intent claude subagents list + assert_success + assert_output_contains "intent - Intent-aware assistant for steel threads and backlog management [NOT INSTALLED]" +} + +@test "claude subagents uninstall prompts for confirmation" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Try to uninstall, saying no + run bash -c "echo 'n' | ${INTENT_BIN_DIR}/intent claude subagents uninstall intent" + assert_success + assert_output_contains "The following agents will be uninstalled:" + assert_output_contains "- intent" + assert_output_contains "Continue?" + assert_output_contains "Cancelled" + + # Verify agent still exists + assert_file_exists "$HOME/.claude/agents/intent.md" +} + +@test "claude subagents uninstall handles non-existent agent" { + run run_intent claude subagents uninstall nonexistent --force + assert_success + assert_output_contains "Uninstalling agent: nonexistent" + assert_output_contains "Agent not found" + assert_output_contains "Skipped: 1" +} + +@test "claude subagents uninstall supports multiple agents" { + # Install multiple agents + run run_intent claude subagents install intent elixir --force + assert_success + + # Uninstall both + run run_intent claude subagents uninstall intent elixir --force + assert_success + assert_output_contains "Uninstalling agent: intent" + assert_output_contains "Uninstalling agent: elixir" + assert_output_contains "Removed: 2" + + # Verify both are gone + assert_file_not_exists "$HOME/.claude/agents/intent.md" + assert_file_not_exists "$HOME/.claude/agents/elixir.md" +} + +@test "claude subagents uninstall --all removes all agents" { + # Install multiple agents + run run_intent claude subagents install intent elixir --force + assert_success + + # Uninstall all + run run_intent claude subagents uninstall --all --force + assert_success + assert_output_contains "Uninstalling agent: intent" + assert_output_contains "Uninstalling agent: elixir" + assert_output_contains "Removed: 2" + + # Verify all are gone + run run_intent claude subagents list + assert_success + assert_output_contains "[NOT INSTALLED]" +} + +@test "claude subagents uninstall updates manifest" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Verify manifest has the agent + run jq '.installed[].name' "$HOME/.intent/agents/installed-agents.json" + assert_success + assert_output_contains "intent" + + # Uninstall + run run_intent claude subagents uninstall intent --force + assert_success + + # Verify manifest no longer has the agent + run jq '.installed[].name' "$HOME/.intent/agents/installed-agents.json" + assert_success + refute_output_contains "intent" +} + +@test "claude subagents uninstall warns about unmanaged agents" { + # Install a managed agent first to ensure manifest exists + run run_intent claude subagents install intent --force + assert_success + + # Manually create an agent not in manifest + mkdir -p "$HOME/.claude/agents" + echo "# Manual agent" > "$HOME/.claude/agents/manual.md" + + # Try to uninstall - need to confirm twice (once for uninstall, once for unmanaged) + run bash -c "printf 'y\nn\n' | ${INTENT_BIN_DIR}/intent claude subagents uninstall manual" + assert_success + assert_output_contains "Warning: Agent not managed by Intent" + assert_output_contains "Remove anyway?" + assert_output_contains "Skipped" + + # Verify it still exists + assert_file_exists "$HOME/.claude/agents/manual.md" + + # Clean up + rm -f "$HOME/.claude/agents/manual.md" +} + +@test "claude subagents uninstall handles missing Claude directory" { + # Remove .claude directory + rm -rf "$HOME/.claude" + + run run_intent claude subagents uninstall intent + assert_failure + assert_output_contains "Error: Claude Code not detected" +} + +@test "claude subagents uninstall handles empty manifest" { + # Clean manifest + rm -rf "$HOME/.intent/agents" 2>/dev/null || true + + run run_intent claude subagents uninstall --all + assert_success + assert_output_contains "No installed agents found" +} + +# Show command tests +@test "claude subagents show requires an agent name" { + run run_intent claude subagents show + assert_failure + assert_output_contains "Error: Agent name required" + assert_output_contains "Usage: intent claude subagents show" +} + +@test "claude subagents show displays agent information" { + run run_intent claude subagents show intent + assert_success + assert_output_contains "Agent: intent" + assert_output_contains "Version: 1.0.0" + assert_output_contains "Description: Intent-aware assistant for steel threads and backlog management" + assert_output_contains "Source: global" + assert_output_contains "Tools: Bash, Read, Write, Edit, Grep" + assert_output_contains "Tags: project-management, steel-threads, backlog, task-tracking" +} + +@test "claude subagents show indicates installation status" { + # First check when not installed + rm -f "$HOME/.claude/agents/intent.md" 2>/dev/null || true + run run_intent claude subagents show intent + assert_success + assert_output_contains "Status: NOT INSTALLED" + assert_output_contains "To install: intent agents install intent" + + # Install and check again + run run_intent claude subagents install intent --force + assert_success + + run run_intent claude subagents show intent + assert_success + assert_output_contains "Status: INSTALLED" + assert_output_contains "Full content: $HOME/.claude/agents/intent.md" +} + +@test "claude subagents show displays metadata" { + run run_intent claude subagents show elixir + assert_success + assert_output_contains "Agent: elixir" + assert_output_contains "Description: Elixir code doctor with Usage Rules and Ash/Phoenix patterns" + assert_output_contains "Author: Intent Contributors" + assert_output_contains "Tools:" + assert_output_contains "Tags:" +} + +@test "claude subagents show includes system prompt preview" { + run run_intent claude subagents show intent + assert_success + assert_output_contains "System Prompt Preview:" + assert_output_contains "You are an Intent-aware development assistant specialized in the Intent project management framework" + assert_output_contains "Intent Framework Knowledge" +} + +@test "claude subagents show displays installation info when installed" { + # Install agent + run run_intent claude subagents install elixir --force + assert_success + + run run_intent claude subagents show elixir + assert_success + assert_output_contains "Status: INSTALLED" + assert_output_contains "Installed: 202" # Partial match for timestamp +} + +@test "claude subagents show handles non-existent agent" { + run run_intent claude subagents show nonexistent + assert_failure + assert_output_contains "Error: Agent 'nonexistent' not found" +} + +@test "claude subagents show works for both agents" { + # Test both intent and elixir agents exist and can be shown + run run_intent claude subagents show intent + assert_success + assert_output_contains "Agent: intent" + + run run_intent claude subagents show elixir + assert_success + assert_output_contains "Agent: elixir" + assert_output_contains "Elixir code doctor" +} + +# Status command tests +@test "claude subagents status shows no agents when none installed" { + # Clean any existing manifest + rm -rf "$HOME/.intent/agents" 2>/dev/null || true + + run run_intent claude subagents status + assert_success + assert_output_contains "No installed agents found" + assert_output_contains "Use 'intent claude subagents install'" +} + +@test "claude subagents status checks agent integrity" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Status should show OK + run run_intent claude subagents status + assert_success + assert_output_contains "Checking agent status" + assert_output_contains "intent" + assert_output_contains "[OK]" + assert_output_contains "Total: 1" + assert_output_contains "OK: 1" +} + +@test "claude subagents status detects missing agents" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Remove the agent file but keep manifest + rm -f "$HOME/.claude/agents/intent.md" + + # Status should detect missing + run run_intent claude subagents status + assert_failure + assert_output_contains "[MISSING]" + assert_output_contains "Agent file not found" + assert_output_contains "Missing: 1" + assert_output_contains "Run 'intent claude subagents install' to restore missing agents" +} + +@test "claude subagents status detects modified agents" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Modify the agent + echo "# Modified" >> "$HOME/.claude/agents/intent.md" + + # Status should detect modification + run run_intent claude subagents status + assert_success + assert_output_contains "[MODIFIED]" + assert_output_contains "Local changes detected" + assert_output_contains "Modified/Updates: 1" +} + +@test "claude subagents status detects available updates" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Simulate source update + echo "# Update" >> "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + + # Status should detect update available + run run_intent claude subagents status + assert_success + assert_output_contains "[UPDATE]" + assert_output_contains "Update available" + assert_output_contains "Run 'intent claude subagents sync'" + + # Clean up + sed -i.bak '/# Update/d' "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + rm -f "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md.bak" +} + +@test "claude subagents status handles missing Claude directory" { + # Remove .claude directory + rm -rf "$HOME/.claude" + + run run_intent claude subagents status + assert_failure + assert_output_contains "Error: Claude Code not detected" +} + +@test "claude subagents status supports verbose flag" { + # Install an agent + run run_intent claude subagents install intent --force + assert_success + + # Run with verbose + run run_intent claude subagents status --verbose + assert_success + assert_output_contains "Source: global" + assert_output_contains "Installed:" + assert_output_contains "Location: $HOME/.claude/agents/intent.md" +} + +@test "claude subagents status works with multiple agents" { + # Install multiple agents + run run_intent claude subagents install intent elixir --force + assert_success + + # Check status + run run_intent claude subagents status + assert_success + assert_output_contains "Total: 2" + assert_output_contains "intent" + assert_output_contains "elixir" + + # Modify one, remove another + echo "# Modified" >> "$HOME/.claude/agents/intent.md" + rm -f "$HOME/.claude/agents/elixir.md" + + # Check mixed status + run run_intent claude subagents status + assert_failure + assert_output_contains "[MODIFIED]" + assert_output_contains "[MISSING]" + assert_output_contains "Modified/Updates: 1" + assert_output_contains "Missing: 1" + refute_output_contains "OK:" +} + +@test "claude subagents status detects outdated manifest" { + # Install agent + run run_intent claude subagents install intent --force + assert_success + + # Manually sync without updating manifest + cp "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" "$HOME/.claude/agents/intent.md" + + # Add a change to source + echo "# Change" >> "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + + # Status should detect update available (since manifest shows old checksum) + run run_intent claude subagents status + assert_success + assert_output_contains "[UPDATE]" + assert_output_contains "Update available" + + # Clean up + sed -i.bak '/# Change/d' "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md" + rm -f "$INTENT_HOME/intent/plugins/claude/subagents/intent/agent.md.bak" +} \ No newline at end of file diff --git a/tests/unit/basic.bats b/tests/unit/basic.bats new file mode 100644 index 0000000..d7d1cf8 --- /dev/null +++ b/tests/unit/basic.bats @@ -0,0 +1,24 @@ +#!/usr/bin/env bats +# Basic tests to verify infrastructure works + +load "../lib/test_helper.bash" + +@test "test helper is loaded" { + # This should pass if test helper is loaded correctly + assert_directory_exists "$INTENT_PROJECT_ROOT" +} + +@test "intent executable exists" { + assert_file_exists "$INTENT_BIN_DIR/intent" +} + +@test "can create test project" { + project_dir=$(create_test_project "Basic Test") + assert_directory_exists "$project_dir" + assert_file_exists "$project_dir/.intent/config.json" +} + +@test "run_intent function works" { + run run_intent --version + assert_output_contains "Intent version" +} \ No newline at end of file diff --git a/tests/unit/bl_commands.bats b/tests/unit/bl_commands.bats new file mode 100644 index 0000000..86b685a --- /dev/null +++ b/tests/unit/bl_commands.bats @@ -0,0 +1,531 @@ +#!/usr/bin/env bats +# Tests for intent bl (backlog) commands (v2.1.0) + +load "../lib/test_helper.bash" + +@test "bl shows help" { + project_dir=$(create_test_project "BL Test") + cd "$project_dir" + + run run_intent bl + assert_success + assert_output_contains "Usage: intent backlog" + assert_output_contains "intent bl" +} + +@test "bl list adds --plain automatically" { + project_dir=$(create_test_project "BL List Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +if [[ "$*" == "task list --plain" ]]; then + echo "To Do:" + echo " task-001 - Test task" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl list + assert_success + assert_output_contains "Backlog called with: task list --plain" + assert_output_contains "task-001 - Test task" +} + +@test "bl create validates and creates task" { + project_dir=$(create_test_project "BL Create Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +if [[ "$1" == "task" && "$2" == "create" && "$3" == "ST0014 - Test task" ]]; then + echo "Created task task-001" + echo "File: backlog/tasks/task-001 - ST0014-Test-task.md" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl create ST0014 "Test task" + assert_success + assert_output_contains "Created task task-001" +} + +@test "bl create rejects invalid steel thread ID" { + project_dir=$(create_test_project "BL Invalid ID Test") + cd "$project_dir" + + run run_intent bl create INVALID "Test task" + assert_failure + assert_output_contains "Invalid steel thread ID format" +} + +@test "bl board passes through without --plain" { + project_dir=$(create_test_project "BL Board Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +if [[ "$*" == "board" ]]; then + echo "Kanban board displayed" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl board + assert_success + assert_output_contains "Backlog called with: board" + assert_output_contains "Kanban board displayed" +} + +@test "bl init configures backlog for Intent" { + project_dir=$(create_test_project "BL Init Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +if [[ "$1" == "init" ]]; then + mkdir -p backlog + echo "project_name: test" > backlog/config.yml + echo "Backlog initialized" + exit 0 +elif [[ "$1" == "config" && "$2" == "set" ]]; then + echo "Config set: $3 = $4" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl init + assert_success + assert_output_contains "Backlog initialized" + assert_output_contains "Configuring backlog for Intent integration" + assert_output_contains "Backlog configured for local Intent use" +} + +@test "bl task edit passes through correctly" { + project_dir=$(create_test_project "BL Task Edit Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +if [[ "$*" == "task edit task-005 --status Done" ]]; then + echo "Task updated" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl task edit task-005 --status Done + assert_success + assert_output_contains "Backlog called with: task edit task-005 --status Done" + assert_output_contains "Task updated" +} + +@test "bl passes through unknown commands" { + project_dir=$(create_test_project "BL Unknown Test") + cd "$project_dir" + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +exit 0 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl decision create "Architecture choice" + assert_success + assert_output_contains "Backlog called with: decision create Architecture choice" +} + +@test "bl requires backlog to be installed" { + project_dir=$(create_test_project "BL No Backlog Test") + cd "$project_dir" + + # Save original PATH + ORIG_PATH="$PATH" + + # Create a temporary directory for our fake commands + FAKE_BIN="$(mktemp -d)" + + # Create fake jq that works (so we get past the jq check) + cat > "$FAKE_BIN/jq" << 'EOF' +#!/bin/bash +# Fake jq that just passes through for our test +exit 0 +EOF + chmod +x "$FAKE_BIN/jq" + + # Set PATH to include fake bin and essential system directories + # This ensures commands like rm, cat, etc. still work + export PATH="$FAKE_BIN:/usr/bin:/bin" + + # Now backlog won't be found, but jq and system commands will work + run run_intent bl list + assert_failure + assert_output_contains "Backlog.md is not installed" + + # Restore PATH and cleanup + export PATH="$ORIG_PATH" + rm -rf "$FAKE_BIN" +} + +@test "bl list respects backlog_list_status from config" { + project_dir=$(create_test_project "BL Status Filter Test") + cd "$project_dir" + + # Set backlog_list_status in config + cat > .intent/config.json << 'EOF' +{ + "version": "2.1.0", + "project_name": "Test Project", + "author": "Test", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_list_status": "todo" +} +EOF + + # Mock backlog command that echoes arguments + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +# Check if -s todo was passed +if [[ "$*" == *"-s todo"* ]]; then + echo "Filtering by status: todo" + echo "todo:" + echo " task-001 - ST0001 - Todo task" +else + echo "No status filter applied" + echo "todo:" + echo " task-001 - ST0001 - Todo task" + echo "done:" + echo " task-002 - ST0002 - Done task" +fi +exit 0 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl list + assert_success + assert_output_contains "Backlog called with: task list --plain -s todo" + assert_output_contains "Filtering by status: todo" + assert_output_contains "task-001 - ST0001 - Todo task" + ! assert_output_contains "task-002 - ST0002 - Done task" +} + +@test "bl list --all ignores backlog_list_status" { + project_dir=$(create_test_project "BL All Test") + cd "$project_dir" + + # Set backlog_list_status in config + cat > .intent/config.json << 'EOF' +{ + "version": "2.1.0", + "project_name": "Test Project", + "author": "Test", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_list_status": "todo" +} +EOF + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +# Check if -s flag was NOT passed (meaning show all) +if [[ "$*" != *"-s"* ]]; then + echo "Showing all tasks" + echo "todo:" + echo " task-001 - ST0001 - Todo task" + echo "done:" + echo " task-002 - ST0002 - Done task" +fi +exit 0 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl list --all + assert_success + assert_output_contains "Showing all tasks" + assert_output_contains "task-001 - ST0001 - Todo task" + assert_output_contains "task-002 - ST0002 - Done task" +} + +@test "bl list validates backlog_list_status" { + project_dir=$(create_test_project "BL Invalid Status Test") + cd "$project_dir" + + # Set invalid backlog_list_status in config + cat > .intent/config.json << 'EOF' +{ + "version": "2.1.0", + "project_name": "Test Project", + "author": "Test", + "created": "2025-07-17", + "st_prefix": "ST", + "backlog_list_status": "invalid-status" +} +EOF + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +echo "Backlog called with: $*" +exit 0 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl list + assert_success + assert_output_contains "Warning: Invalid backlog_list_status 'invalid-status'" + assert_output_contains "Valid statuses are: todo wip done cancelled archived" +} + +@test "bl task pad requires --size argument" { + project_dir=$(create_test_project "BL Pad Size Test") + cd "$project_dir" + + run run_intent bl task pad task-9 + assert_failure + assert_output_contains "No --size specified and backlog not configured" +} + +@test "bl task pad validates size is numeric" { + project_dir=$(create_test_project "BL Pad Size Numeric Test") + cd "$project_dir" + + run run_intent bl task pad task-9 --size abc + assert_failure + assert_output_contains "Invalid --size value. Must be a positive number" +} + +@test "bl task pad requires task ID or --all" { + project_dir=$(create_test_project "BL Pad Args Test") + cd "$project_dir" + + run run_intent bl task pad --size 3 + assert_failure + assert_output_contains "Must specify either a task ID or --all" +} + +@test "bl task pad rejects both task ID and --all" { + project_dir=$(create_test_project "BL Pad Both Args Test") + cd "$project_dir" + + run run_intent bl task pad task-9 --all --size 3 + assert_failure + assert_output_contains "Cannot specify both a task ID and --all" +} + +@test "bl task pad pads single task correctly" { + project_dir=$(create_test_project "BL Pad Single Test") + cd "$project_dir" + + # Create backlog directory structure + mkdir -p backlog/tasks + + # Create a test task file + cat > "backlog/tasks/task-9 - ST0001-Test-task.md" << 'EOF' +--- +id: task-9 +title: ST0001 - Test task +status: todo +assignee: [] +created_date: '2025-07-23' +updated_date: '2025-07-23' +labels: [] +dependencies: [] +--- + +## Description +Test task +EOF + + run run_intent bl task pad task-9 --size 3 + assert_success + assert_output_contains "Padding tasks to 3 digits..." + assert_output_contains "Padding: task-9 - ST0001-Test-task.md -> task-009 - ST0001-Test-task.md" + assert_output_contains "Successfully padded task" + assert_output_contains "intent bl config set zeroPaddedIds 3" + + # Verify file was renamed + assert_file_exists "backlog/tasks/task-009 - ST0001-Test-task.md" + [ ! -f "backlog/tasks/task-9 - ST0001-Test-task.md" ] + + # Verify ID was updated in file content + run grep "^id: task-009$" "backlog/tasks/task-009 - ST0001-Test-task.md" + assert_success +} + +@test "bl task pad handles already padded tasks" { + project_dir=$(create_test_project "BL Pad Already Padded Test") + cd "$project_dir" + + # Create backlog directory structure + mkdir -p backlog/tasks + + # Create an already padded task file + cat > "backlog/tasks/task-009 - ST0001-Test-task.md" << 'EOF' +--- +id: task-009 +title: ST0001 - Test task +status: todo +assignee: [] +created_date: '2025-07-23' +updated_date: '2025-07-23' +labels: [] +dependencies: [] +--- + +## Description +Test task +EOF + + run run_intent bl task pad task-009 --size 3 + assert_success + assert_output_contains "Task 'task-009' is already padded to 3 digits" +} + +@test "bl task pad --all pads all tasks" { + project_dir=$(create_test_project "BL Pad All Test") + cd "$project_dir" + + # Create backlog directory structure + mkdir -p backlog/tasks + mkdir -p backlog/archive/tasks + + # Create test task files + cat > "backlog/tasks/task-1 - ST0001-First.md" << 'EOF' +--- +id: task-1 +title: ST0001 - First +status: todo +--- +EOF + + cat > "backlog/tasks/task-10 - ST0002-Second.md" << 'EOF' +--- +id: task-10 +title: ST0002 - Second +status: todo +--- +EOF + + cat > "backlog/archive/tasks/task-5 - ST0003-Archived.md" << 'EOF' +--- +id: task-5 +title: ST0003 - Archived +status: archived +--- +EOF + + run run_intent bl task pad --all --size 3 + assert_success + assert_output_contains "Padding tasks to 3 digits..." + assert_output_contains "Processed backlog/tasks/: 2 files updated, 0 already padded" + assert_output_contains "Processed backlog/archive/tasks/: 1 files updated, 0 already padded" + assert_output_contains "Total: 3 tasks updated" + + # Verify files were renamed + assert_file_exists "backlog/tasks/task-001 - ST0001-First.md" + assert_file_exists "backlog/tasks/task-010 - ST0002-Second.md" + assert_file_exists "backlog/archive/tasks/task-005 - ST0003-Archived.md" + + # Verify IDs were updated + run grep "^id: task-001$" "backlog/tasks/task-001 - ST0001-First.md" + assert_success + run grep "^id: task-010$" "backlog/tasks/task-010 - ST0002-Second.md" + assert_success + run grep "^id: task-005$" "backlog/archive/tasks/task-005 - ST0003-Archived.md" + assert_success +} + +@test "bl task pad handles non-existent task" { + project_dir=$(create_test_project "BL Pad Not Found Test") + cd "$project_dir" + + # Create empty backlog directory + mkdir -p backlog/tasks + + run run_intent bl task pad task-999 --size 3 + assert_failure + assert_output_contains "Error: Task 'task-999' not found" +} + +@test "bl task pad uses configured size when no --size provided" { + project_dir=$(create_test_project "BL Pad Config Size Test") + cd "$project_dir" + + # Create backlog directory and config + mkdir -p backlog/tasks + cat > backlog/config.yml << 'EOF' +zeroPaddedIds: 2 +EOF + + # Create a test task file + cat > "backlog/tasks/task-5 - ST0001-Test.md" << 'EOF' +--- +id: task-5 +title: ST0001 - Test +status: todo +--- +EOF + + # Mock backlog config get command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +if [[ "$1" == "config" && "$2" == "get" && "$3" == "zeroPaddedIds" ]]; then + echo "2" + exit 0 +fi +echo "Unknown command" +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent bl task pad --all + assert_success + assert_output_contains "Using configured zero padding size: 2" + assert_output_contains "Padding tasks to 2 digits..." + + # Verify file was renamed + assert_file_exists "backlog/tasks/task-05 - ST0001-Test.md" +} \ No newline at end of file diff --git a/tests/unit/bootstrap.bats b/tests/unit/bootstrap.bats new file mode 100644 index 0000000..23999bf --- /dev/null +++ b/tests/unit/bootstrap.bats @@ -0,0 +1,95 @@ +#!/usr/bin/env bats +# Tests for the intent_bootstrap command + +load "../lib/test_helper.bash" + +# Override setup to handle HOME directory for bootstrap tests +setup() { + # First call parent setup to create TEST_TEMP_DIR + TEST_TEMP_DIR="$(mktemp -d /tmp/intent-test-XXXXXX)" + cd "${TEST_TEMP_DIR}" || exit 1 + + # Backup real home config if it exists + if [ -d "$HOME/.config/intent" ]; then + export BACKUP_CONFIG=true + export BACKUP_DIR="$HOME/.config/intent.bak.$$" + mv "$HOME/.config/intent" "$BACKUP_DIR" + fi + + # Set test HOME + export ORIG_HOME="$HOME" + export HOME="${TEST_TEMP_DIR}/home" + mkdir -p "$HOME" + mkdir -p "$HOME/.config" +} + +# Clean up after each test +teardown() { + # Restore original HOME + export HOME="$ORIG_HOME" + + # Restore backed up config + if [ "$BACKUP_CONFIG" = true ]; then + rm -rf "$ORIG_HOME/.config/intent" + if [ -d "$BACKUP_DIR" ]; then + mv "$BACKUP_DIR" "$ORIG_HOME/.config/intent" + fi + fi +} + +@test "intent_bootstrap creates global config directory" { + # Verify directory doesn't exist yet + [ ! -d "$HOME/.config/intent" ] + + # Run bootstrap (non-interactive mode) + run run_intent bootstrap --quiet + assert_success + + # Verify directory was created + assert_directory_exists "$HOME/.config/intent" +} + +@test "intent_bootstrap creates default config.json" { + # Run bootstrap + run run_intent bootstrap --quiet + assert_success + + # Verify config was created with correct content + assert_file_exists "$HOME/.config/intent/config.json" + assert_file_contains "$HOME/.config/intent/config.json" '"intent_version": "2.1.0"' + assert_file_contains "$HOME/.config/intent/config.json" '"intent_dir": "intent"' + assert_file_contains "$HOME/.config/intent/config.json" '"backlog_dir": "backlog"' +} + +@test "intent_bootstrap provides PATH setup instructions" { + # Don't use --quiet so we see the instructions + run run_intent bootstrap + assert_success + + # Should show PATH instructions or already configured message + # (Bootstrap output varies based on configuration state) + [ -n "$output" ] || fail "Expected output from bootstrap" +} + +@test "intent_bootstrap handles existing config gracefully" { + # Create existing config + mkdir -p "$HOME/.config/intent" + echo '{"intent_version": "1.0.0", "custom": "value"}' > "$HOME/.config/intent/config.json" + + # Run bootstrap - should not overwrite + run run_intent bootstrap --quiet + assert_success + + # Original config should be preserved + assert_file_contains "$HOME/.config/intent/config.json" '"intent_version": "1.0.0"' + assert_file_contains "$HOME/.config/intent/config.json" '"custom": "value"' +} + +@test "intent_bootstrap runs doctor after setup" { + run run_intent bootstrap --quiet + assert_success + + # Should see doctor output at the end + assert_output_contains "Intent Doctor" + assert_output_contains "Checking INTENT_HOME" +} \ No newline at end of file diff --git a/tests/unit/config.bats b/tests/unit/config.bats new file mode 100644 index 0000000..5e9e634 --- /dev/null +++ b/tests/unit/config.bats @@ -0,0 +1,74 @@ +#!/usr/bin/env bats +# Test configuration loading and PROJECT_ROOT detection + +load "../lib/test_helper.bash" + +@test "PROJECT_ROOT is detected correctly" { + # Create nested directory structure + project_dir=$(create_test_project "Root Project") + mkdir -p "$project_dir/subdir/deeper" + + # From project root + cd "$project_dir" + run run_intent info + assert_success + assert_output_contains "Location: $project_dir" + + # From subdirectory + cd "$project_dir/subdir" + run run_intent info + assert_success + assert_output_contains "Location: $project_dir" + + # From deeper subdirectory + cd "$project_dir/subdir/deeper" + run run_intent info + assert_success + assert_output_contains "Location: $project_dir" +} + +@test "config.json is loaded correctly" { + project_dir=$(create_test_project "Config Test Project") + cd "$project_dir" + + # Update config with custom values + cat > ".intent/config.json" << EOF +{ + "intent_version": "2.0.0", + "project_name": "Custom Project Name", + "author": "custom_author", + "created_date": "2025-01-15T10:00:00Z" +} +EOF + + run run_intent info + assert_success + assert_output_contains "Name: Custom Project Name" + assert_output_contains "Author: custom_author" +} + +@test "legacy stp structure is detected" { + # Create legacy structure in a test directory + legacy_dir="${TEST_TEMP_DIR}/legacy_project" + mkdir -p "$legacy_dir/stp/prj/st" + mkdir -p "$legacy_dir/stp/.config" + echo "stp_version: 2.0.0" > "$legacy_dir/stp/.config/version" + + cd "$legacy_dir" + + # Intent should detect this as a legacy project + run run_intent doctor + assert_success + # Should show it found a project needing upgrade + assert_output_contains "Intent Doctor" +} + +@test "missing config shows appropriate error" { + # Create directory without .intent/config.json + mkdir empty_dir + cd empty_dir + + run run_intent st list + assert_failure + assert_output_contains "Not in an Intent project directory" +} \ No newline at end of file diff --git a/tests/unit/fileindex_commands.bats b/tests/unit/fileindex_commands.bats new file mode 100644 index 0000000..40f2b44 --- /dev/null +++ b/tests/unit/fileindex_commands.bats @@ -0,0 +1,631 @@ +#!/usr/bin/env bats +# Test suite for intent_fileindex command + +load ../lib/test_helper + +# Test basic functionality +@test "fileindex: shows help with -h flag" { + run "${INTENT_BIN_DIR}/intent_fileindex" -h + assert_failure + assert_output_contains "Usage: intent_fileindex [OPTIONS] [STARTDIR] [FILESPEC]" + assert_output_contains "Create and manage file indexes with checkbox states" +} + +@test "fileindex: shows help with --help flag" { + run "${INTENT_BIN_DIR}/intent_fileindex" --help + assert_failure + assert_output_contains "Usage: intent_fileindex [OPTIONS] [STARTDIR] [FILESPEC]" +} + +# Test standalone mode +@test "fileindex: lists files in current directory (standalone mode)" { + # Create test files + touch test1.ex test2.exs test3.txt + + run "${INTENT_BIN_DIR}/intent_fileindex" + assert_success + assert_output_contains "[ ] ./test1.ex" + assert_output_contains "[ ] ./test2.exs" + refute_output_contains "test3.txt" +} + +@test "fileindex: respects custom filespec" { + # Create test files + touch test1.py test2.py test3.rb + + run "${INTENT_BIN_DIR}/intent_fileindex" . "*.py" + assert_success + assert_output_contains "[ ] ./test1.py" + assert_output_contains "[ ] ./test2.py" + refute_output_contains "test3.rb" +} + +@test "fileindex: recursive search with -r flag" { + # Create nested structure + mkdir -p subdir/nested + touch file1.ex subdir/file2.ex subdir/nested/file3.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -r + assert_success + assert_output_contains "[ ] ./file1.ex" + assert_output_contains "[ ] ./subdir/file2.ex" + assert_output_contains "[ ] ./subdir/nested/file3.ex" +} + +@test "fileindex: non-recursive by default" { + # Create nested structure + mkdir -p subdir + touch file1.ex subdir/file2.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" + assert_success + assert_output_contains "[ ] ./file1.ex" + refute_output_contains "subdir/file2.ex" +} + +# Test output options +@test "fileindex: output to file with -f" { + touch test1.ex test2.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -f output.txt + assert_success + assert_file_exists output.txt + assert_file_contains output.txt "[ ] ./test1.ex" + assert_file_contains output.txt "[ ] ./test2.ex" +} + +@test "fileindex: output to file with --file" { + touch test1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" --file output2.txt + assert_success + assert_file_exists output2.txt + assert_file_contains output2.txt "[ ] ./test1.ex" +} + +# Test index file functionality +@test "fileindex: creates index file with -i" { + touch file1.ex file2.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + assert_file_exists test.index + + # Check JSON header + assert_file_contains test.index '"generator": "intent-fileindex"' + assert_file_contains test.index '"context": "standalone"' + + # Check file entries + assert_file_contains test.index "[ ] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: preserves checkbox states in index" { + touch file1.ex file2.ex + + # Create initial index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Manually mark one file as checked + sed -i.bak 's/\[ \] \.\/file1\.ex/[x] .\/file1.ex/' test.index + + # Run again + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Verify state was preserved + assert_file_contains test.index "[x] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: adds new files to existing index" { + touch file1.ex + + # Create initial index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + assert_file_contains test.index "[ ] ./file1.ex" + + # Add new file + touch file2.ex + + # Run again + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Both files should be present + assert_file_contains test.index "[ ] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: removes deleted files from index" { + touch file1.ex file2.ex + + # Create initial index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Mark one as checked + sed -i.bak 's/\[ \] \.\/file2\.ex/[x] .\/file2.ex/' test.index + + # Remove a file + rm file1.ex + + # Run again + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Only file2 should remain + refute_output_contains "file1.ex" + assert_file_contains test.index "[x] ./file2.ex" +} + +# Test verbose mode +@test "fileindex: verbose mode shows processing details" { + touch file1.ex file2.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -v + assert_success + assert_output_contains "Processing:" + assert_output_contains "Summary: Processed 2 files" +} + +@test "fileindex: verbose mode with index file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -v -i test.index + assert_success + assert_output_contains "Index updated: ./test.index" +} + +# Test Intent project integration +@test "fileindex: detects Intent project and uses lib/ default" { + # Create test Intent project + local project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + # Create lib directory with files + mkdir -p lib src + touch lib/app.ex lib/server.ex src/other.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" + assert_success + assert_output_contains "[ ] lib/app.ex" + assert_output_contains "[ ] lib/server.ex" + refute_output_contains "src/other.ex" +} + +@test "fileindex: uses .intent/indexes/ for index files in Intent project" { + local project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + mkdir -p lib + touch lib/app.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i myproject.index + assert_success + assert_file_exists ".intent/indexes/myproject.index" + assert_file_contains ".intent/indexes/myproject.index" '"context": "intent_project"' +} + +@test "fileindex: --no-intent flag disables Intent integration" { + local project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + touch file1.ex + mkdir -p lib + touch lib/app.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" --no-intent + assert_success + assert_output_contains "[ ] ./file1.ex" + refute_output_contains "lib/app.ex" +} + +@test "fileindex: --intent-dir specifies Intent project" { + # Create Intent project in different location + local project_dir=$(create_test_project "Test Project" "$TEST_TEMP_DIR/other-project") + + # Run from outside the project + mkdir -p "$TEST_TEMP_DIR/work" + cd "$TEST_TEMP_DIR/work" + + mkdir -p "$project_dir/lib" + touch "$project_dir/lib/app.ex" + + # Need to cd to project directory since script looks for files relative to pwd + cd "$project_dir" + + run "${INTENT_BIN_DIR}/intent_fileindex" --intent-dir "$project_dir" + assert_success + assert_output_contains "[ ] lib/app.ex" +} + +@test "fileindex: --index-dir overrides default index directory" { + touch file1.ex + mkdir -p indexes + + run "${INTENT_BIN_DIR}/intent_fileindex" --index-dir indexes -i test.index + assert_success + assert_file_exists "indexes/test.index" +} + +# Test error conditions +@test "fileindex: handles non-existent directory" { + run "${INTENT_BIN_DIR}/intent_fileindex" nonexistent + assert_failure + assert_output_contains "Error: Directory 'nonexistent' does not exist" +} + +@test "fileindex: handles empty directory" { + mkdir empty + + run "${INTENT_BIN_DIR}/intent_fileindex" empty + assert_success + # Should complete without error but with no output +} + +# Test complex scenarios +@test "fileindex: handles files with spaces in names" { + touch "file with spaces.ex" + + run "${INTENT_BIN_DIR}/intent_fileindex" + assert_success + assert_output_contains '[ ] ./file with spaces.ex' +} + +@test "fileindex: sorts files consistently" { + touch z.ex a.ex m.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" + assert_success + + # Extract just the filenames and check order + echo "$output" | grep -E "^\[.\]" > actual_order.txt + assert_file_contains actual_order.txt "[ ] ./a.ex" + assert_file_contains actual_order.txt "[ ] ./m.ex" + assert_file_contains actual_order.txt "[ ] ./z.ex" + + # Verify a.ex comes before z.ex in the output + local a_line=$(grep -n "a.ex" actual_order.txt | cut -d: -f1) + local z_line=$(grep -n "z.ex" actual_order.txt | cut -d: -f1) + [ "$a_line" -lt "$z_line" ] || fail "Files not sorted correctly" +} + +@test "fileindex: handles mixed file extensions correctly" { + touch app.ex app.exs config.json + + run "${INTENT_BIN_DIR}/intent_fileindex" . "*.{ex,exs}" + assert_success + assert_output_contains "[ ] ./app.ex" + assert_output_contains "[ ] ./app.exs" + refute_output_contains "config.json" +} + +# Test integration with intent command +@test "fileindex: works through intent command" { + touch test.ex + + run run_intent fileindex + assert_success + assert_output_contains "[ ] ./test.ex" +} + +@test "fileindex: help available through intent help" { + run run_intent help fileindex + assert_success + assert_output_contains "Create and manage file indexes with checkbox states" + assert_output_contains "@usage:" + assert_output_contains "intent fileindex [OPTIONS]" +} + +# Toggle functionality tests +@test "fileindex: toggle file state from unchecked to checked" { + touch file1.ex file2.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Toggle file1.ex + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -X ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + + # Verify state persisted + assert_file_contains test.index "[x] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: toggle file state from checked to unchecked" { + touch file1.ex + + # Create index and manually mark as checked + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + sed -i.bak 's/\[ \] \.\/file1\.ex/[x] .\/file1.ex/' test.index + + # Toggle back to unchecked + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -X ./file1.ex + assert_success + assert_output "[ ] ./file1.ex" +} + +@test "fileindex: toggle with --toggle flag" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index --toggle ./file1.ex + assert_success + assert_output "[x] ./file1.ex" +} + +@test "fileindex: toggle requires index file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -X ./file1.ex + assert_failure + assert_output_contains "Error: Toggle mode requires an index file" +} + +@test "fileindex: toggle with non-existent file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -X ./nonexistent.ex + assert_failure + assert_output_contains "Error: File './nonexistent.ex' not found in index" +} + +@test "fileindex: toggle with non-existent index" { + run "${INTENT_BIN_DIR}/intent_fileindex" -i nonexistent.index -X ./file.ex + assert_failure + assert_output_contains "Error: Index file" + assert_output_contains "does not exist" +} + +@test "fileindex: toggle preserves file order in index" { + touch a.ex b.ex z.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Toggle middle file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -X ./b.ex + assert_success + + # Check order is preserved + grep -E "^\[.\]" test.index > actual_order.txt + assert_file_contains actual_order.txt "[ ] ./a.ex" + assert_file_contains actual_order.txt "[x] ./b.ex" + assert_file_contains actual_order.txt "[ ] ./z.ex" + + # Verify order + local line1=$(grep -n "a.ex" actual_order.txt | cut -d: -f1) + local line2=$(grep -n "b.ex" actual_order.txt | cut -d: -f1) + local line3=$(grep -n "z.ex" actual_order.txt | cut -d: -f1) + [ "$line1" -lt "$line2" ] || fail "File order not preserved" + [ "$line2" -lt "$line3" ] || fail "File order not preserved" +} + +# Check functionality tests +@test "fileindex: check file from unchecked to checked" { + touch file1.ex file2.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Check file1.ex + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + + # Verify state persisted + assert_file_contains test.index "[x] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: check already checked file remains checked" { + touch file1.ex + + # Create index and manually mark as checked + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + sed -i.bak 's/\[ \] \.\/file1\.ex/[x] .\/file1.ex/' test.index + + # Check already checked file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + + # Verify still checked + assert_file_contains test.index "[x] ./file1.ex" +} + +@test "fileindex: check with --check flag" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index --check ./file1.ex + assert_success + assert_output "[x] ./file1.ex" +} + +@test "fileindex: check requires index file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -C ./file1.ex + assert_failure + assert_output_contains "Error: Check mode requires an index file" +} + +@test "fileindex: check with non-existent file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./nonexistent.ex + assert_failure + assert_output_contains "Error: File './nonexistent.ex' not found in index" +} + +@test "fileindex: check with non-existent index" { + run "${INTENT_BIN_DIR}/intent_fileindex" -i nonexistent.index -C ./file.ex + assert_failure + assert_output_contains "Error: Index file" + assert_output_contains "does not exist" +} + +# Uncheck functionality tests +@test "fileindex: uncheck file from checked to unchecked" { + touch file1.ex file2.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Manually mark file1 as checked + sed -i.bak 's/\[ \] \.\/file1\.ex/[x] .\/file1.ex/' test.index + + # Uncheck file1.ex + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -U ./file1.ex + assert_success + assert_output "[ ] ./file1.ex" + + # Verify state persisted + assert_file_contains test.index "[ ] ./file1.ex" + assert_file_contains test.index "[ ] ./file2.ex" +} + +@test "fileindex: uncheck already unchecked file remains unchecked" { + touch file1.ex + + # Create index (files are unchecked by default) + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Uncheck already unchecked file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -U ./file1.ex + assert_success + assert_output "[ ] ./file1.ex" + + # Verify still unchecked + assert_file_contains test.index "[ ] ./file1.ex" +} + +@test "fileindex: uncheck with --uncheck flag" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Mark as checked first + sed -i.bak 's/\[ \] \.\/file1\.ex/[x] .\/file1.ex/' test.index + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index --uncheck ./file1.ex + assert_success + assert_output "[ ] ./file1.ex" +} + +@test "fileindex: uncheck requires index file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -U ./file1.ex + assert_failure + assert_output_contains "Error: Uncheck mode requires an index file" +} + +@test "fileindex: uncheck with non-existent file" { + touch file1.ex + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -U ./nonexistent.ex + assert_failure + assert_output_contains "Error: File './nonexistent.ex' not found in index" +} + +@test "fileindex: uncheck with non-existent index" { + run "${INTENT_BIN_DIR}/intent_fileindex" -i nonexistent.index -U ./file.ex + assert_failure + assert_output_contains "Error: Index file" + assert_output_contains "does not exist" +} + +# Combined tests +@test "fileindex: check and uncheck preserve file order" { + touch a.ex b.ex c.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + + # Check middle file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./b.ex + assert_success + + # Check first file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./a.ex + assert_success + + # Uncheck middle file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -U ./b.ex + assert_success + + # Check order is preserved + grep -E "^\[.\]" test.index > actual_order.txt + assert_file_contains actual_order.txt "[x] ./a.ex" + assert_file_contains actual_order.txt "[ ] ./b.ex" + assert_file_contains actual_order.txt "[ ] ./c.ex" + + # Verify order + local line1=$(grep -n "a.ex" actual_order.txt | cut -d: -f1) + local line2=$(grep -n "b.ex" actual_order.txt | cut -d: -f1) + local line3=$(grep -n "c.ex" actual_order.txt | cut -d: -f1) + [ "$line1" -lt "$line2" ] || fail "File order not preserved" + [ "$line2" -lt "$line3" ] || fail "File order not preserved" +} + +@test "fileindex: sequential check, uncheck, and toggle operations" { + touch file1.ex + + # Create index + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index + assert_success + assert_file_contains test.index "[ ] ./file1.ex" + + # Check the file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + assert_file_contains test.index "[x] ./file1.ex" + + # Uncheck the file + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -U ./file1.ex + assert_success + assert_output "[ ] ./file1.ex" + assert_file_contains test.index "[ ] ./file1.ex" + + # Toggle the file (should become checked) + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -X ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + assert_file_contains test.index "[x] ./file1.ex" + + # Check the file (should remain checked) + run "${INTENT_BIN_DIR}/intent_fileindex" -i test.index -C ./file1.ex + assert_success + assert_output "[x] ./file1.ex" + assert_file_contains test.index "[x] ./file1.ex" +} \ No newline at end of file diff --git a/tests/unit/global_commands.bats b/tests/unit/global_commands.bats new file mode 100644 index 0000000..783614b --- /dev/null +++ b/tests/unit/global_commands.bats @@ -0,0 +1,72 @@ +#!/usr/bin/env bats +# Test global commands that should work without a project context + +load "../lib/test_helper.bash" + +@test "intent with no args shows info" { + run run_intent + assert_success + assert_output_contains "Intent: The Steel Thread Process" + assert_output_contains "Installation:" + assert_output_contains "INTENT_HOME:" +} + +@test "intent help works anywhere" { + local version=$(get_intent_version) + run run_intent help + assert_success + assert_output_contains "Intent v${version} - Structured Development Process" + assert_output_contains "Usage: intent <command>" +} + +@test "intent doctor works anywhere" { + local version=$(get_intent_version) + run run_intent doctor + assert_success + assert_output_contains "Intent Doctor v${version}" + assert_output_contains "Checking INTENT_HOME" +} + +@test "intent info works anywhere" { + run run_intent info + assert_success + assert_output_contains "Intent: The Steel Thread Process" + assert_output_contains "Installation:" +} + +@test "intent version works anywhere" { + local version=$(get_intent_version) + run run_intent version + assert_success + assert_output_contains "Intent version ${version}" +} + +@test "intent --version works anywhere" { + local version=$(get_intent_version) + run run_intent --version + assert_success + assert_output_contains "Intent version ${version}" +} + +@test "intent bootstrap works anywhere" { + # Just check it runs without error - don't actually bootstrap + run run_intent bootstrap --help + assert_success + assert_output_contains "bootstrap" +} + +@test "intent init works in empty directory" { + local version=$(get_intent_version) + # Don't actually run init, just check help + run run_intent init --help + # Note: init --help exits with status 1 + assert_failure + assert_output_contains "Initialize a new Intent v${version} project" +} + +@test "intent handles unknown command gracefully" { + run run_intent nonexistentcommand + assert_failure + assert_output_contains "Unknown command 'nonexistentcommand'" + assert_output_contains "Run 'intent help' for usage" +} \ No newline at end of file diff --git a/tests/unit/help_commands.bats b/tests/unit/help_commands.bats new file mode 100644 index 0000000..c8689ca --- /dev/null +++ b/tests/unit/help_commands.bats @@ -0,0 +1,61 @@ +#!/usr/bin/env bats +# Tests for intent help commands (v2.0.0) + +load "../lib/test_helper.bash" + +@test "help displays general help when no command is specified" { + local version=$(get_intent_version) + # Help is a global command - doesn't need project context + run run_intent help + assert_success + assert_output_contains "Intent v${version} - Structured Development Process" + assert_output_contains "Usage:" + assert_output_contains "Core:" + assert_output_contains "st" + assert_output_contains "bl" + assert_output_contains "task" +} + +@test "help displays command-specific help when a command is specified" { + run run_intent help st + assert_success + assert_output_contains "No help available for command 'st'" + assert_output_contains "intent st --help" +} + +@test "help shows short descriptions for all commands" { + run run_intent help + assert_success + + # Check for core commands + assert_output_contains "st" + assert_output_contains "bl" + assert_output_contains "task" + assert_output_contains "init" + assert_output_contains "doctor" +} + +@test "help handles unknown commands correctly" { + run run_intent help unknown_command + assert_failure + assert_output_contains "Unknown command 'unknown_command'" +} + +@test "help works with --help flag" { + run run_intent help --help + assert_failure + assert_output_contains "Unknown command '--help'" +} + +@test "help shows proper command categories" { + run run_intent help + assert_success + + # Check for category headers + assert_output_contains "Core:" + assert_output_contains "Utility:" + + # Verify commands exist + assert_output_contains "st" + assert_output_contains "bl" +} \ No newline at end of file diff --git a/tests/unit/init_commands.bats b/tests/unit/init_commands.bats new file mode 100644 index 0000000..a49097f --- /dev/null +++ b/tests/unit/init_commands.bats @@ -0,0 +1,158 @@ +#!/usr/bin/env bats +# Tests for intent init commands (v2.1.0) + +load "../lib/test_helper.bash" + +@test "init uses directory name if no project name given" { + # Create temporary directory for init + test_dir=$(mktemp -d) + cd "$test_dir" + + run run_intent init + assert_success + assert_directory_exists ".intent" + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + +@test "init creates a project in the current directory by default" { + # Create temporary directory for init + test_dir=$(mktemp -d) + cd "$test_dir" + + run run_intent init "Test Project" + assert_success + + # Check if project structure was created + assert_directory_exists ".intent" + assert_directory_exists "intent" + assert_directory_exists "intent/st" + assert_directory_exists "intent/docs" + assert_directory_exists "intent/llm" + assert_file_exists ".intent/config.json" + + # Check config content + assert_file_contains ".intent/config.json" '"project_name": "Test Project"' + assert_file_contains ".intent/config.json" '"version": "2.1.0"' + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + +@test "init creates a project in a specified directory" { + # Create temporary directory + test_dir=$(mktemp -d) + target_dir="$test_dir/my-project" + + mkdir -p "$target_dir" + cd "$target_dir" + run run_intent init "Test Project" + assert_success + + # Check if project was created in specified directory + assert_directory_exists "$target_dir/.intent" + assert_directory_exists "$target_dir/intent" + assert_directory_exists "$target_dir/intent/st" + assert_file_exists "$target_dir/.intent/config.json" + + # Cleanup + rm -rf "$test_dir" +} + +@test "init creates proper configuration file" { + # Create temporary directory + test_dir=$(mktemp -d) + cd "$test_dir" + + run run_intent init "My Test Project" + assert_success + + # Check configuration content + assert_file_contains ".intent/config.json" '"project_name": "My Test Project"' + assert_file_contains ".intent/config.json" '"version": "2.1.0"' + assert_file_contains ".intent/config.json" '"created":' + assert_file_contains ".intent/config.json" '"author":' + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + +@test "init creates required project files" { + # Create temporary directory + test_dir=$(mktemp -d) + cd "$test_dir" + + run run_intent init "Test Project" + assert_success + + # Check for essential files + assert_file_exists "CLAUDE.md" + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + +@test "init fails on existing Intent project" { + # Create temporary directory with existing project + test_dir=$(mktemp -d) + cd "$test_dir" + + # Create existing project + mkdir -p .intent + echo '{"name": "Existing"}' > .intent/config.json + + run run_intent init "New Project" + assert_failure + assert_output_contains "already an Intent project" + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + + +@test "init creates proper directory permissions" { + # Create temporary directory + test_dir=$(mktemp -d) + cd "$test_dir" + + run run_intent init "Test Project" + assert_success + + # Check directory permissions (should be readable/writable/executable by owner) + [ -r ".intent" ] || fail ".intent not readable" + [ -w ".intent" ] || fail ".intent not writable" + [ -x ".intent" ] || fail ".intent not executable" + + [ -r "intent" ] || fail "intent not readable" + [ -w "intent" ] || fail "intent not writable" + [ -x "intent" ] || fail "intent not executable" + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} + +@test "init respects author from git config" { + # Create temporary directory + test_dir=$(mktemp -d) + cd "$test_dir" + + # Set up author via environment + export INTENT_AUTHOR="Test Author" + + run run_intent init "Test Project" + assert_success + + # Check if author was picked up from git + assert_file_contains ".intent/config.json" '"author": "Test Author"' + + # Cleanup + cd - > /dev/null + rm -rf "$test_dir" +} \ No newline at end of file diff --git a/tests/unit/migration.bats b/tests/unit/migration.bats new file mode 100644 index 0000000..7f979d3 --- /dev/null +++ b/tests/unit/migration.bats @@ -0,0 +1,73 @@ +#!/usr/bin/env bats +# Test migration and backup functionality + +load "../lib/test_helper.bash" + +@test "backup directory uses .backup_ prefix" { + project_dir=$(create_test_project "Backup Test") + cd "$project_dir" + + # Create some content to backup + mkdir -p "intent/st/ST0001" + echo "test content" > "intent/st/ST0001/info.md" + + # Source helpers to test backup function + source "${INTENT_BIN_DIR}/intent_helpers" + + # Create backup + create_project_backup "$project_dir" + + # Check that backup was created with correct prefix + backup_dirs=(.backup_*) + assert_directory_exists "${backup_dirs[0]}" + + # Verify it's not using old .stp_backup_ prefix + if ls .stp_backup_* 2>/dev/null; then + fail "Found old .stp_backup_ directory, should use .backup_" + fi +} + +@test "gitignore contains .backup_* pattern" { + project_dir=$(create_test_project "Gitignore Test") + cd "$project_dir" + + # Create a gitignore with the helpers function + source "${INTENT_BIN_DIR}/intent_helpers" + create_v2_directory_structure "$project_dir" + + # Check gitignore contains new pattern + assert_file_exists ".gitignore" + assert_file_contains ".gitignore" ".backup_*" + + # Verify old pattern is not present + if grep -q ".stp_backup_" ".gitignore"; then + fail ".gitignore contains old .stp_backup_ pattern" + fi +} + +@test "intent_version is used in frontmatter" { + project_dir=$(create_test_project "Version Test") + cd "$project_dir" + + # Create a steel thread manually to test frontmatter + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +id: ST0001 +title: Test Thread +status: In Progress +created: 2025-01-01 +author: test_user +intent_version: 2.0.0 +--- + +# Test Thread +EOF + + # Check it uses intent_version, not stp_version + assert_file_contains "intent/st/ST0001/info.md" "intent_version:" + + if grep -q "stp_version:" "intent/st/ST0001/info.md"; then + fail "Steel thread contains old stp_version in frontmatter" + fi +} \ No newline at end of file diff --git a/tests/unit/project_commands.bats b/tests/unit/project_commands.bats new file mode 100644 index 0000000..ea60e5e --- /dev/null +++ b/tests/unit/project_commands.bats @@ -0,0 +1,87 @@ +#!/usr/bin/env bats +# Test project commands that require a project context + +load "../lib/test_helper.bash" + +@test "intent st requires project - shows error outside project" { + # Run from temp directory without project + # Use 'list' subcommand to trigger project check + run run_intent st list + assert_failure + assert_output_contains "Not in an Intent project directory" + assert_output_contains "The 'st' command requires an Intent project" + assert_output_contains "To create a new project: intent init" +} + +@test "intent bl requires project - shows error outside project" { + # Use 'list' subcommand to trigger project check + run run_intent bl list + assert_failure + assert_output_contains "Not in an Intent project directory" + assert_output_contains "The 'bl' command requires an Intent project" +} + +@test "intent task requires project - shows error outside project" { + # Note: intent task with no args shows usage (exit 0) + # Test with an actual subcommand to trigger project check + run run_intent task list ST0001 + assert_failure + assert_output_contains "Not in an Intent project directory" + assert_output_contains "The 'task' command requires an Intent project" +} + +@test "intent st list works inside project" { + # Create test project + project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + # Create a test steel thread + mkdir -p "intent/st/ST0001" + cat > "intent/st/ST0001/info.md" << EOF +--- +id: ST0001 +title: Test Steel Thread +status: In Progress +created: 2025-01-01 +author: test_user +--- + +# ST0001: Test Steel Thread + +Test description +EOF + + run run_intent st list + assert_success + assert_output_contains "ST0001" + assert_output_contains "Test Steel Thread" +} + +@test "intent bl works inside project with backlog" { + # Create test project + project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + # Create dummy backlog + touch "backlog/Backlog.md" + + run run_intent bl --help + assert_success + assert_output_contains "backlog" +} + +@test "intent info shows project details when inside project" { + # Create test project + project_dir=$(create_test_project "Test Project") + cd "$project_dir" + + # Create some steel threads + mkdir -p "intent/st/ST0001" + mkdir -p "intent/st/COMPLETED/ST0002" + + run run_intent info + assert_success + assert_output_contains "Project:" + assert_output_contains "Name: Test Project" + assert_output_contains "Steel Threads:" +} \ No newline at end of file diff --git a/tests/unit/st_commands.bats b/tests/unit/st_commands.bats new file mode 100644 index 0000000..2e1a63a --- /dev/null +++ b/tests/unit/st_commands.bats @@ -0,0 +1,805 @@ +#!/usr/bin/env bats +# Tests for intent st commands (v2.0.0) + +load "../lib/test_helper.bash" + +@test "st requires a command" { + project_dir=$(create_test_project "ST Test") + cd "$project_dir" + + run run_intent st + assert_failure + assert_output_contains "Steel thread command is required" +} + +@test "st new creates a new steel thread" { + project_dir=$(create_test_project "ST New Test") + cd "$project_dir" + + # Set EDITOR to avoid interactive prompt + export EDITOR=echo + + run run_intent st new "Test Steel Thread" + assert_success + + # Check if steel thread directory was created + # New threads start in NOT-STARTED subdirectory + assert_directory_exists "intent/st/NOT-STARTED/ST0001" + assert_file_exists "intent/st/NOT-STARTED/ST0001/info.md" + assert_file_contains "intent/st/NOT-STARTED/ST0001/info.md" "Test Steel Thread" +} + +@test "st new creates sequential steel thread IDs" { + project_dir=$(create_test_project "ST Sequential Test") + cd "$project_dir" + + export EDITOR=echo + + # Create first steel thread + run run_intent st new "First Steel Thread" + assert_success + assert_directory_exists "intent/st/NOT-STARTED/ST0001" + + # Create second steel thread + run run_intent st new "Second Steel Thread" + assert_success + assert_directory_exists "intent/st/NOT-STARTED/ST0002" + + # Create third steel thread + run run_intent st new "Third Steel Thread" + assert_success + assert_directory_exists "intent/st/NOT-STARTED/ST0003" +} + + + +@test "st list shows only in-progress threads by default" { + project_dir=$(create_test_project "ST List Test") + cd "$project_dir" + + # Create steel threads with different statuses + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +id: ST0001 +title: First Steel Thread +status: In Progress +created: 2025-01-01 +author: test_user +intent_version: 2.0.0 +--- +EOF + + mkdir -p intent/st/ST0002 + cat > intent/st/ST0002/info.md << EOF +--- +id: ST0002 +title: Second Steel Thread +status: In Progress +created: 2025-01-02 +author: test_user +intent_version: 2.0.0 +--- +EOF + + mkdir -p intent/st/COMPLETED/ST0003 + cat > intent/st/COMPLETED/ST0003/info.md << EOF +--- +id: ST0003 +title: Third Steel Thread +status: Completed +created: 2025-01-03 +completed: 2025-01-04 +author: test_user +intent_version: 2.0.0 +--- +EOF + + mkdir -p intent/st/NOT-STARTED/ST0004 + cat > intent/st/NOT-STARTED/ST0004/info.md << EOF +--- +id: ST0004 +title: Fourth Steel Thread +status: Not Started +created: 2025-01-05 +author: test_user +intent_version: 2.0.0 +--- +EOF + + run run_intent st list + assert_success + + # Check only in-progress threads are listed + assert_output_contains "ST0001" + assert_output_contains "ST0002" + + # Should not show completed or not started + if [[ "$output" == *"ST0003"* ]]; then + fail "Completed thread shown in default view" + fi + if [[ "$output" == *"ST0004"* ]]; then + fail "Not Started thread shown in default view" + fi +} + +@test "st list --status all shows all steel threads" { + project_dir=$(create_test_project "ST List All Test") + cd "$project_dir" + + # Create steel threads with different statuses + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +id: ST0001 +title: First Steel Thread +status: In Progress +created: 2025-01-01 +author: test_user +intent_version: 2.0.0 +--- +EOF + + mkdir -p intent/st/COMPLETED/ST0003 + cat > intent/st/COMPLETED/ST0003/info.md << EOF +--- +id: ST0003 +title: Third Steel Thread +status: Completed +created: 2025-01-03 +completed: 2025-01-04 +author: test_user +intent_version: 2.0.0 +--- +EOF + + run run_intent st list --status all + assert_success + + # Check all threads are listed + assert_output_contains "ST0001" + assert_output_contains "ST0003" +} + +@test "st list --status filters by status" { + project_dir=$(create_test_project "ST Filter Test") + cd "$project_dir" + + # Create threads with different statuses + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +id: ST0001 +title: Active Thread +status: In Progress +--- +EOF + + mkdir -p intent/st/COMPLETED/ST0002 + cat > intent/st/COMPLETED/ST0002/info.md << EOF +--- +id: ST0002 +title: Done Thread +status: Completed +--- +EOF + + # List only In Progress + run run_intent st list --status "In Progress" + assert_success + assert_output_contains "ST0001" + + # Should not contain completed thread + if [[ "$output" == *"ST0002"* ]]; then + fail "Completed thread shown when filtering for In Progress" + fi +} + +@test "st show displays the content of a steel thread" { + project_dir=$(create_test_project "ST Show Test") + cd "$project_dir" + + # Create a steel thread + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +id: ST0001 +title: Test Steel Thread +status: In Progress +created: 2025-01-01 +author: test_user +intent_version: 2.0.0 +--- + +# ST0001: Test Steel Thread + +## Description +This is the thread content. + +## Tasks +- [ ] Task 1 +- [ ] Task 2 +EOF + + run run_intent st show ST0001 + assert_success + assert_output_contains "ST0001: Test Steel Thread" + assert_output_contains "This is the thread content" + assert_output_contains "Task 1" +} + +@test "st show works with just the number" { + project_dir=$(create_test_project "ST Show Number Test") + cd "$project_dir" + + mkdir -p intent/st/ST0001 + echo "# ST0001: Test" > intent/st/ST0001/info.md + + run run_intent st show 1 + assert_success + assert_output_contains "ST0001" +} + +@test "st show errors on non-existent steel thread" { + project_dir=$(create_test_project "ST Error Test") + cd "$project_dir" + + run run_intent st show ST9999 + assert_failure + assert_output_contains "not found" +} + +@test "st organize moves threads by status" { + project_dir=$(create_test_project "ST Organize Test") + cd "$project_dir" + + # Create completed thread in wrong location + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +id: ST0001 +title: Should be in COMPLETED +status: Completed +--- +EOF + + # Create active thread in completed location + mkdir -p intent/st/COMPLETED/ST0002 + cat > intent/st/COMPLETED/ST0002/info.md << EOF +--- +id: ST0002 +title: Should be active +status: In Progress +--- +EOF + + # Create the index file first since organize expects it + cat > intent/st/steel_threads.md << EOF +# Steel Threads + +## Active Threads + +## Completed Threads +EOF + + run run_intent st organize --write + assert_success + + # Check threads were moved + assert_directory_exists "intent/st/COMPLETED/ST0001" + assert_directory_exists "intent/st/ST0002" + [ ! -d "intent/st/ST0001" ] || fail "ST0001 still in active directory" + [ ! -d "intent/st/COMPLETED/ST0002" ] || fail "ST0002 still in COMPLETED" +} + +@test "st creates steel_threads.md index" { + project_dir=$(create_test_project "ST Index Test") + cd "$project_dir" + + export EDITOR=echo + + # Create a steel thread + run run_intent st new "Index Test Thread" + assert_success + + # Check if index was created/updated + assert_file_exists "intent/st/steel_threads.md" + assert_file_contains "intent/st/steel_threads.md" "Steel Threads" +} + +@test "st repair shows dry-run output" { + project_dir=$(create_test_project "ST Repair Test") + cd "$project_dir" + + # Create a steel thread with malformed frontmatter + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +verblock: "06 Mar 2025:v0.1: Test User - Initial version"\nstp_version: 1.2.1\nstatus: Not Started\ncreated: 20250306\ncompleted: \n +--- +# ST0001: Test Thread + +- **Status**: Completed +- **Created**: 2025-03-06 +- **Completed**: 2025-03-07 +EOF + + run run_intent st repair ST0001 + assert_success + assert_output_contains "Processing: ST0001" + assert_output_contains "Found malformed frontmatter" + assert_output_contains "Would fix malformed frontmatter" + # Note: When frontmatter is malformed, it doesn't have separate status field + # so conflicting status isn't detected until after fixing frontmatter + assert_output_contains "Dry run complete. Use --write to apply changes." +} + +@test "st repair --write fixes malformed frontmatter" { + project_dir=$(create_test_project "ST Repair Write Test") + cd "$project_dir" + + # Create a steel thread with malformed frontmatter + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +verblock: "06 Mar 2025:v0.1: Test User - Initial version"\nstp_version: 1.2.1\nstatus: Not Started\ncreated: 20250306\ncompleted: \n +--- +# ST0001: Test Thread + +- **Status**: Completed +- **Created**: 2025-03-06 +- **Completed**: 2025-03-07 +EOF + + # Create the index file first since organize expects it + cat > intent/st/steel_threads.md << EOF +# Steel Threads + +## Active Threads + +## Completed Threads +EOF + + run run_intent st repair ST0001 --write + assert_success + assert_output_contains "Fixed malformed frontmatter" + # The stp_version -> intent_version update happens as part of frontmatter fix + assert_output_contains "Updated frontmatter status to: Completed" + + # Verify the file was moved to COMPLETED directory + assert_file_contains "intent/st/COMPLETED/ST0001/info.md" "intent_version: 2.0.0" + assert_file_contains "intent/st/COMPLETED/ST0001/info.md" "status: Completed" + + # Should not contain stp_version anymore + run grep "stp_version" intent/st/COMPLETED/ST0001/info.md + assert_failure +} + +@test "st repair all threads without specific ID" { + project_dir=$(create_test_project "ST Repair All Test") + cd "$project_dir" + + # Create multiple threads with issues + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +stp_version: 1.2.1 +status: In Progress +--- +# ST0001: First Thread +EOF + + mkdir -p intent/st/ST0002 + cat > intent/st/ST0002/info.md << 'EOF' +--- +intent_version: 2.0.0 +status: WIP +--- +# ST0002: Second Thread + +- **Status**: In Progress +EOF + + run run_intent st repair + assert_success + assert_output_contains "Processing: ST0001" + assert_output_contains "Found legacy stp_version field" + assert_output_contains "Processing: ST0002" + assert_output_contains "Found conflicting status:" + assert_output_contains "Dry run complete" +} + +@test "st repair handles missing status field" { + project_dir=$(create_test_project "ST Repair Missing Status Test") + cd "$project_dir" + + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +intent_version: 2.0.0 +created: 20250306 +--- +# ST0001: No Status Thread + +- **Status**: In Progress +EOF + + run run_intent st repair ST0001 + assert_success + assert_output_contains "Missing status field in frontmatter" + assert_output_contains "Would add status field" +} + +@test "st repair validates date formats" { + project_dir=$(create_test_project "ST Repair Date Test") + cd "$project_dir" + + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << 'EOF' +--- +intent_version: 2.0.0 +status: In Progress +created: 2025-03-06 +--- +# ST0001: Bad Date Format +EOF + + run run_intent st repair ST0001 + assert_success + assert_output_contains "Invalid created date format: 2025-03-06" + assert_output_contains "Would fix created date format" +} + +@test "st repair handles non-existent steel thread" { + project_dir=$(create_test_project "ST Repair Not Found Test") + cd "$project_dir" + + run run_intent st repair ST9999 + assert_failure + assert_output_contains "Steel thread not found: ST9999" +} + +@test "st start marks a not-started thread as in progress" { + project_dir=$(create_test_project "ST Start Test") + cd "$project_dir" + + # Create a not-started thread + mkdir -p intent/st/NOT-STARTED/ST0001 + cat > intent/st/NOT-STARTED/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +created: 20250117 +--- +# ST0001: Test Thread + +- **Status**: Not Started +- **Created**: $(date '+%Y-%m-%d') +- **Completed**: +- **Author**: test_user +EOF + + run run_intent st start ST0001 + assert_success + assert_output_contains "Marked steel thread as in progress: ST0001: Test Thread" + + # Check thread was moved to main directory + assert_directory_exists "intent/st/ST0001" + assert_file_exists "intent/st/ST0001/info.md" + [ ! -d "intent/st/NOT-STARTED/ST0001" ] || fail "ST0001 still in NOT-STARTED directory" + + # Check status was updated + assert_file_contains "intent/st/ST0001/info.md" "status: WIP" + assert_file_contains "intent/st/ST0001/info.md" "**Status**: WIP" +} + +@test "st start works with just the number" { + project_dir=$(create_test_project "ST Start Number Test") + cd "$project_dir" + + mkdir -p intent/st/NOT-STARTED/ST0042 + cat > intent/st/NOT-STARTED/ST0042/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +--- +# ST0042: Number Test Thread +- **Status**: Not Started +EOF + + run run_intent st start 42 + assert_success + assert_output_contains "ST0042" + assert_directory_exists "intent/st/ST0042" +} + +@test "st start works with various ID formats" { + project_dir=$(create_test_project "ST Start ID Format Test") + cd "$project_dir" + + # Test with leading zeros + mkdir -p intent/st/NOT-STARTED/ST0003 + cat > intent/st/NOT-STARTED/ST0003/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +--- +# ST0003: Test Thread +- **Status**: Not Started +EOF + + run run_intent st start 0003 + assert_success + assert_output_contains "ST0003" + assert_directory_exists "intent/st/ST0003" +} + +@test "st start does nothing if thread is already in progress" { + project_dir=$(create_test_project "ST Start Already Progress Test") + cd "$project_dir" + + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: In Progress +--- +# ST0001: Already Active Thread +- **Status**: In Progress +EOF + + run run_intent st start ST0001 + assert_success + assert_output_contains "Steel thread is already in progress: ST0001: Already Active Thread" + + # Thread should remain in main directory + assert_directory_exists "intent/st/ST0001" +} + +@test "st start moves completed thread to in progress" { + project_dir=$(create_test_project "ST Start Completed Test") + cd "$project_dir" + + # Create a completed thread + mkdir -p intent/st/COMPLETED/ST0001 + cat > intent/st/COMPLETED/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Completed +created: 20250115 +completed: 20250116 +--- +# ST0001: Completed Thread + +- **Status**: Completed +- **Created**: 2025-01-15 +- **Completed**: 2025-01-16 +EOF + + run run_intent st start ST0001 + assert_success + assert_output_contains "Marked steel thread as in progress: ST0001: Completed Thread" + + # Check thread was moved to main directory + assert_directory_exists "intent/st/ST0001" + [ ! -d "intent/st/COMPLETED/ST0001" ] || fail "ST0001 still in COMPLETED directory" + + # Check status was updated + assert_file_contains "intent/st/ST0001/info.md" "status: WIP" + assert_file_contains "intent/st/ST0001/info.md" "**Status**: WIP" +} + +@test "st start updates steel_threads.md index" { + project_dir=$(create_test_project "ST Start Index Test") + cd "$project_dir" + + # Use current date consistently + CURRENT_DATE=$(date '+%Y-%m-%d') + + # Create index file + cat > intent/st/steel_threads.md << EOF +# Steel Threads + +This document serves as an index of all steel threads in the project. + +## Index + +| ID | Title | Status | Created | Completed | +| ----------------------- | -------------------- | ------------ | ---------- | ---------- | +| ST0001 | Test Thread | Not Started | $CURRENT_DATE | | +EOF + + mkdir -p intent/st/NOT-STARTED/ST0001 + cat > intent/st/NOT-STARTED/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +created: $(date '+%Y%m%d') +--- +# ST0001: Test Thread +- **Status**: Not Started +- **Created**: $CURRENT_DATE +EOF + + run run_intent st start ST0001 + assert_success + + # Check index was updated + assert_file_contains "intent/st/steel_threads.md" "| ST0001 | Test Thread | WIP | $CURRENT_DATE | |" +} + +@test "st start errors on non-existent steel thread" { + project_dir=$(create_test_project "ST Start Error Test") + cd "$project_dir" + + run run_intent st start ST9999 + assert_failure + assert_output_contains "Steel thread not found: ST9999" +} + +@test "st start requires a steel thread ID" { + project_dir=$(create_test_project "ST Start No ID Test") + cd "$project_dir" + + run run_intent st start + assert_failure + assert_output_contains "Steel thread ID is required" +} + +@test "st start handles thread in main directory" { + project_dir=$(create_test_project "ST Start Main Dir Test") + cd "$project_dir" + + # Create thread already in main directory but not started + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +--- +# ST0001: Main Dir Thread +- **Status**: Not Started +EOF + + run run_intent st start ST0001 + assert_success + assert_output_contains "Marked steel thread as in progress: ST0001: Main Dir Thread" + + # Thread should remain in main directory + assert_directory_exists "intent/st/ST0001" + + # Check status was updated + assert_file_contains "intent/st/ST0001/info.md" "status: WIP" + assert_file_contains "intent/st/ST0001/info.md" "**Status**: WIP" +} + +@test "st list with comma-separated statuses" { + project_dir=$(create_test_project "ST List Comma Test") + cd "$project_dir" + + # Create threads with different statuses + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: WIP +--- +# ST0001: WIP Thread +EOF + + mkdir -p intent/st/NOT-STARTED/ST0002 + cat > intent/st/NOT-STARTED/ST0002/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +--- +# ST0002: Not Started Thread +EOF + + mkdir -p intent/st/COMPLETED/ST0003 + cat > intent/st/COMPLETED/ST0003/info.md << EOF +--- +intent_version: 2.0.0 +status: Completed +--- +# ST0003: Completed Thread +EOF + + # Test comma-separated filtering + run run_intent st list --status "wip,completed" + assert_success + assert_output_contains "ST0001" + assert_output_contains "ST0003" + + # Should not contain not started + if [[ "$output" == *"ST0002"* ]]; then + fail "Not Started thread shown when not requested" + fi +} + +@test "st list status ordering" { + project_dir=$(create_test_project "ST List Order Test") + cd "$project_dir" + + # Create threads with different statuses + mkdir -p intent/st/COMPLETED/ST0001 + cat > intent/st/COMPLETED/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Completed +created: 20250101 +--- +# ST0001: Completed Thread +EOF + + mkdir -p intent/st/ST0002 + cat > intent/st/ST0002/info.md << EOF +--- +intent_version: 2.0.0 +status: WIP +created: 20250102 +--- +# ST0002: WIP Thread +EOF + + # Test that ordering is preserved + run run_intent st list --status "completed,wip" + assert_success + + # Extract the IDs in order from output + output_ids=$(echo "$output" | grep -E "^ST[0-9]+" | awk '{print $1}') + first_id=$(echo "$output_ids" | head -1) + second_id=$(echo "$output_ids" | tail -1) + + # Completed should come first as requested + [[ "$first_id" == "ST0001" ]] || fail "Completed thread should be listed first" + [[ "$second_id" == "ST0002" ]] || fail "WIP thread should be listed second" +} + +@test "st list with TBC status" { + project_dir=$(create_test_project "ST List TBC Test") + cd "$project_dir" + + # Create thread with TBC status + mkdir -p intent/st/NOT-STARTED/ST0001 + cat > intent/st/NOT-STARTED/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: Not Started +--- +# ST0001: TBC Thread +EOF + + # Test filtering with TBC + run run_intent st list --status "tbc" + assert_success + assert_output_contains "ST0001" +} + +@test "st list with case-insensitive status" { + project_dir=$(create_test_project "ST List Case Test") + cd "$project_dir" + + # Create WIP thread + mkdir -p intent/st/ST0001 + cat > intent/st/ST0001/info.md << EOF +--- +intent_version: 2.0.0 +status: WIP +--- +# ST0001: WIP Thread +EOF + + # Test case-insensitive filtering + run run_intent st list --status "WIP" + assert_success + assert_output_contains "ST0001" + + run run_intent st list --status "wip" + assert_success + assert_output_contains "ST0001" + + run run_intent st list --status "Wip" + assert_success + assert_output_contains "ST0001" +} \ No newline at end of file diff --git a/tests/unit/task_commands.bats b/tests/unit/task_commands.bats new file mode 100644 index 0000000..6ac8d8b --- /dev/null +++ b/tests/unit/task_commands.bats @@ -0,0 +1,206 @@ +#!/usr/bin/env bats +# Tests for intent task commands (v2.0.0) + +load "../lib/test_helper.bash" + +@test "task requires a command" { + project_dir=$(create_test_project "Task Test") + cd "$project_dir" + + run run_intent task + assert_success # Shows usage + assert_output_contains "Usage: intent task" +} + +@test "task shows help with --help" { + project_dir=$(create_test_project "Task Help Test") + cd "$project_dir" + + run run_intent task --help + assert_success + assert_output_contains "Usage: intent task" + assert_output_contains "create" + assert_output_contains "list" + assert_output_contains "sync" +} + +@test "task create creates a new backlog task" { + project_dir=$(create_test_project "Task Create Test") + cd "$project_dir" + + # Create a steel thread + mkdir -p intent/st/ST0014 + cat > intent/st/ST0014/info.md << EOF +--- +id: ST0014 +title: Test Steel Thread +status: In Progress +created: 2025-03-20 +author: test_user +intent_version: 2.0.0 +--- + +# ST0014: Test Steel Thread + +## Tasks +- [ ] First task +- [ ] Second task +EOF + + # Mock backlog command + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +if [[ "$1" == "task" && "$2" == "create" ]]; then + echo "Created task task-001" + echo "File: backlog/tasks/task-001.md" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent task create ST0014 "Test task description" + assert_success + assert_output_contains "Creating task: ST0014 - Test task description" + assert_output_contains "Task created successfully" +} + +@test "task create validates steel thread ID format" { + project_dir=$(create_test_project "Task Validate Test") + cd "$project_dir" + + run run_intent task create INVALID "Test task" + assert_failure + assert_output_contains "Invalid steel thread ID format" +} + +@test "task create requires both ID and title" { + project_dir=$(create_test_project "Task Args Test") + cd "$project_dir" + + run run_intent task create ST0014 + assert_failure + assert_output_contains "Both steel thread ID and title are required" +} + +@test "task list shows tasks for a steel thread" { + project_dir=$(create_test_project "Task List Test") + cd "$project_dir" + + # Create test task files + mkdir -p backlog/tasks + cat > "backlog/tasks/task-001 - ST0014-First-task.md" << EOF +--- +id: task-001 +title: ST0014 - First task +status: Done +assignee: [] +created_date: '2025-07-08' +labels: [] +dependencies: [] +--- + +## Description +First task description +EOF + + cat > "backlog/tasks/task-002 - ST0014-Second-task.md" << EOF +--- +id: task-002 +title: ST0014 - Second task +status: To Do +assignee: [] +created_date: '2025-07-08' +labels: [] +dependencies: [] +--- + +## Description +Second task description +EOF + + # Create a task for different ST + cat > "backlog/tasks/task-003 - ST0015-Other-task.md" << EOF +--- +id: task-003 +title: ST0015 - Other task +status: To Do +--- +EOF + + run run_intent task list ST0014 + assert_success + assert_output_contains "Tasks for ST0014:" + assert_output_contains "task-001" + assert_output_contains "task-002" + + # Should not show task from other ST + if [[ "$output" == *"ST0015"* ]]; then + fail "Task from different steel thread shown" + fi +} + +@test "task list handles no tasks found" { + project_dir=$(create_test_project "Task Empty Test") + cd "$project_dir" + + mkdir -p backlog/tasks + + run run_intent task list ST0099 + assert_success + assert_output_contains "Tasks for ST0099:" +} + +@test "task sync updates steel thread status based on tasks" { + project_dir=$(create_test_project "Task Sync Test") + cd "$project_dir" + + # Create steel thread + mkdir -p intent/st/ST0014 + cat > intent/st/ST0014/info.md << EOF +--- +id: ST0014 +title: Test Steel Thread +status: In Progress +--- +EOF + + # Create completed tasks + mkdir -p backlog/tasks + cat > "backlog/tasks/task-001 - ST0014-Task.md" << EOF +--- +id: task-001 +title: ST0014 - Task +status: Done +--- +EOF + + # Mock backlog command for task view + mkdir -p bin + cat > bin/backlog << 'EOF' +#!/bin/bash +if [[ "$1" == "task" && "$2" == "view" && "$3" == "task-001" ]]; then + echo "Status: done" + exit 0 +fi +exit 1 +EOF + chmod +x bin/backlog + export PATH="$PWD/bin:$PATH" + + run run_intent task sync ST0014 + assert_success + assert_output_contains "Task Summary:" + assert_output_contains "Total: 0" +} + +@test "task sync validates steel thread exists" { + project_dir=$(create_test_project "Task Sync Validate Test") + cd "$project_dir" + + run run_intent task sync ST9999 + assert_success + assert_output_contains "Syncing status for ST9999" +} \ No newline at end of file