From 097a6a93f836bdc4087e73fff676ec2816074427 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Mon, 23 Jun 2025 10:13:04 +0800 Subject: [PATCH 01/15] fix: unset TMUX env in VSCode and Zed terminals Review notes: - This change correctly addresses the issue of `fzf --tmux` hanging in VSCode and Zed terminals by unsetting the `TMUX` environment variable. - The conditional check for `$TERM_PROGRAM` is appropriate for identifying these specific environments. - This fix improves the user experience when using `fzf` within these integrated terminals. --- nix/hm/tmux.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/hm/tmux.nix b/nix/hm/tmux.nix index 616b25c6..fbbea2aa 100644 --- a/nix/hm/tmux.nix +++ b/nix/hm/tmux.nix @@ -320,4 +320,11 @@ in # ========== End UI ''; }; + programs.fish.interactiveShellInit = '' + # unset TMUX env if we are in vscode/zed terminal + # otherwise, the fzf --tmux will hang. + if test "$TERM_PROGRAM" = "vscode" -o "$TERM_PROGRAM" = "zed" + set -e TMUX + end + ''; } From 87b380d8a18b7d88cd804246152a2b2e1ff6338d Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Mon, 23 Jun 2025 12:02:30 +0800 Subject: [PATCH 02/15] flake.lock: Update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'jj-repo': 'git+ssh://git@github.com/pze/jj.git?ref=refs/heads/main&rev=0a9ab49dc5e59d9d6032477d0f4e916a24c1ca21&shallow=1' (2025-06-13) → 'git+ssh://git@github.com/pze/jj.git?ref=refs/heads/main&rev=bc83eb0a4837bdec89792967dd28674141f4a364&shallow=1' (2025-06-23) --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 31975a39..3532df9d 100644 --- a/flake.lock +++ b/flake.lock @@ -464,10 +464,10 @@ "rust-overlay": "rust-overlay" }, "locked": { - "lastModified": 1749774239, - "narHash": "sha256-MBktcY0rpOBUc6DNQ20OKFBzAgaM3Ix+O39ckxPEmIw=", + "lastModified": 1750638421, + "narHash": "sha256-AvSzcirthitS9k4bv31HUMa8mGZLJd1M3bNgXV48YeE=", "ref": "refs/heads/main", - "rev": "0a9ab49dc5e59d9d6032477d0f4e916a24c1ca21", + "rev": "bc83eb0a4837bdec89792967dd28674141f4a364", "shallow": true, "type": "git", "url": "ssh://git@github.com/pze/jj.git" From 6be15479c5e8ef123eb4231b4160730bd73a6652 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Tue, 24 Jun 2025 17:17:46 +0800 Subject: [PATCH 03/15] feat: improve jj-fork description handling Allow `jj-fork` to automatically use the commit message of a specified revision as the description if no description is explicitly provided. This enhances usability by reducing the need for manual input when forking existing commits. Review notes: - The logic for fetching the commit message from `jj log` seems robust. - Error handling for cases where the commit message cannot be retrieved is in place. - The `tig` configuration changes are minor and seem to improve display without introducing issues. - Consider adding a test case for the `jj-fork` script to ensure the new description fallback works as expected. --- conf/fish/funcs/jj-fork.fish | 29 +++++++++++++++++++++++++---- conf/tig/config | 4 ++-- conf/tig/vim.tigrc | 1 + 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/conf/fish/funcs/jj-fork.fish b/conf/fish/funcs/jj-fork.fish index 5d2f1a17..49801ffd 100644 --- a/conf/fish/funcs/jj-fork.fish +++ b/conf/fish/funcs/jj-fork.fish @@ -11,7 +11,7 @@ function jj-fork --description "Fork from a bookmark or revision" set -l help_string "Usage: jj-fork -d [-b |-r ] [--no-new] -d, --description Description for the new revision -b, --bookmark Bookmark to fork from (will fetch from origin) - -r, --revision Revision to fork from (local revision) + -r, --revision Revision to fork from (local revision, implies --no-new by default) --no-new Create bookmark on existing revision without creating new commit -h, --help Show this help" @@ -20,10 +20,31 @@ function jj-fork --description "Fork from a bookmark or revision" return 0 end + # If revision is provided, set --no-new as default + if set -q _flag_revision + if not set -q _flag_no_new + set _flag_no_new true + end + end + if not set -q _flag_description - echo "Error: Description is required" - echo $help_string - return 1 + # If revision is provided, try to get commit message as description + if set -q _flag_revision + echo "No description provided, getting commit message from revision $_flag_revision..." + set -l commit_message (jj --ignore-working-copy log --quiet -r $_flag_revision -T 'description' 2>/dev/null) + if test $status -eq 0 -a -n "$commit_message" + set _flag_description $commit_message + echo "Using commit message as description: $commit_message" + else + echo "Error: Could not get commit message from revision $_flag_revision" + echo $help_string + return 1 + end + else + echo "Error: Description is required" + echo $help_string + return 1 + end end # Ensure either bookmark or revision is provided, but not both diff --git a/conf/tig/config b/conf/tig/config index 984f95bc..e91a87f9 100644 --- a/conf/tig/config +++ b/conf/tig/config @@ -1,11 +1,11 @@ # https://raw.githubusercontent.com/jonas/tig/master/tigrc set vertical-split = auto set split-view-width = 80% -# vim will crash if not support +# vim will crash if not support set editor-line-number = no set tab-size = 2 set git-colors = "branch.current=main-head grep.filename=grep.file" -set main-view = line-number:no,interval=2 id:yes date:default,format="%Y-%m-%d" author:abbreviated commit-title:yes,graph,refs,overflow=no +set main-view = line-number:no,interval=2 id:no date:default,format="%Y-%m-%d" author:abbreviated commit-title:yes,graph,refs:no,overflow=yes set git-colors = no diff --git a/conf/tig/vim.tigrc b/conf/tig/vim.tigrc index bbf03a5b..549d4838 100644 --- a/conf/tig/vim.tigrc +++ b/conf/tig/vim.tigrc @@ -79,6 +79,7 @@ bind generic oi :toggle id bind generic ot :toggle commit-title-overflow bind generic oF :toggle file-filter bind generic or :toggle commit-title-refs +bind generic oR :toggle refs bind generic @ none bind generic @j :/^@@ From 434eea6d9491d10188949af8eda43c24f9f13ea8 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Wed, 25 Jun 2025 12:28:23 +0800 Subject: [PATCH 04/15] fix(jj): update log command to use '..@' for better diffing --- nix/hm/jj.nix | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nix/hm/jj.nix b/nix/hm/jj.nix index 61c0c073..4d5be5ad 100644 --- a/nix/hm/jj.nix +++ b/nix/hm/jj.nix @@ -385,8 +385,6 @@ in ]; sq = [ "squash" - "-k" - "-u" ]; push = [ "util" @@ -517,7 +515,8 @@ in ]; lo = [ "log" - "--no-graph" + "-r" + "..@" ]; }; ui = { From 989cf49a6bce053a2c661b39ef99f49c85ec7ebe Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Wed, 25 Jun 2025 13:08:32 +0800 Subject: [PATCH 05/15] fix(jj): update immutable_heads to include older commits --- nix/hm/jj.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/hm/jj.nix b/nix/hm/jj.nix index 4d5be5ad..1c5de657 100644 --- a/nix/hm/jj.nix +++ b/nix/hm/jj.nix @@ -822,7 +822,7 @@ in revset-aliases = { "m-m" = "description('private: megamerge')"; - # Override immutable_heads to include pub/sandbox bookmark + # Override immutable_heads to include pub/sandbox bookmark and commits older than 1 day "immutable_heads()" = "builtin_immutable_heads() | present(pub/sandbox)"; "new_visible_commits(op)" = From 00ffefa81788770cc8f91a10f6660d420fee064b Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Wed, 25 Jun 2025 22:52:04 +0800 Subject: [PATCH 06/15] docs: update gen-prompt.md content Review notes: - The diff indicates a near-complete rewrite of the `gen-prompt.md` file. - Ensure the new content accurately reflects the current prompt generation logic and requirements. - Verify that all necessary information from the previous version has been either incorporated or intentionally removed. - Consider if this extensive change warrants a more detailed commit message body explaining the rationale for the rewrite. --- conf/llm/aichat/roles/gen-prompt.md | 222 ++-------------------------- 1 file changed, 11 insertions(+), 211 deletions(-) diff --git a/conf/llm/aichat/roles/gen-prompt.md b/conf/llm/aichat/roles/gen-prompt.md index 91398245..563992c9 100644 --- a/conf/llm/aichat/roles/gen-prompt.md +++ b/conf/llm/aichat/roles/gen-prompt.md @@ -215,238 +215,38 @@ Please write an agent task plan in markdown file suffix with `-task-plan.md` in - **LIBRARY EVALUATION**: Research GitHub repositories for popularity, maintenance status, and community adoption **Expected Task Plan Structure:** -Use this markdown template for your task plan: +Use the following **top-level sections** in the given order. Items prefixed with (!) are mandatory and MUST appear exactly as written. Sub-headers (if any) should follow the same naming. -``` +```markdown # Task Plan: [Project Name] -## Specific Implementation Requirements -[**MANDATORY SECTION** - Identify and document all user-provided information that is required for task implementation. Include any URLs, names, values, commands, paths, configurations, version numbers, or other details necessary to complete the task successfully.] - -## Codebase Analysis -- **CRITICAL: Working Directory Context**: [**MANDATORY FIRST SECTION** - Establish correct directory context before any other analysis] - - **Current Working Directory**: [Use `pwd` to document the exact current working directory path] - - **Project Root Location**: [Identify the actual project root directory and its relationship to current working directory] - - **Directory Relationship**: [Clearly document the relationship - e.g., "Current working directory `/project/monorepo` is the project root, but Elixir work must be done in `/project/monorepo/elixir-apps/` subdirectory"] - - **Target Implementation Context**: [**CRITICAL** - Specify the exact directory where work should be performed. If current working directory is project root but task involves specific technology (e.g., Elixir, Node.js), identify the correct subdirectory and specify navigation commands needed] - - **Path Resolution Strategy**: [Document whether file paths in the task plan should be relative to current working directory, project root, or absolute paths] - - **Navigation Requirements**: [**MANDATORY** - If the task requires working in a different directory than current, specify the exact navigation commands needed. For example, if pwd is `/project/monorepo` but Elixir work needs to be done, specify `cd elixir-apps` before running commands like `mix test`] -- **Project Type & Architecture**: [Identify if this is a monorepo, umbrella project (e.g., Elixir umbrella), multi-language workspace, or single application. Document the overall architectural approach and organization strategy] -- **Current Project Structure**: [Provide detailed directory tree analysis including:] - - **Root Level Structure**: [Document all top-level directories and their purposes, clearly distinguishing between project root and current working directory structure] - - **Sub-projects/Applications**: [For umbrella projects or monorepos, list all individual applications/services with their locations (e.g., `apps/web_app`, `apps/api_service`), specifying paths relative to both project root AND current working directory] - - **Shared Resources**: [Identify shared libraries, configurations, or utilities and their locations relative to both project root and current working directory] - - **Key Files**: [Document important configuration files at root and sub-project levels, with paths clearly specified relative to current working directory] -- **CRITICAL: Data Flow Analysis**: [**MANDATORY** section to prevent data flow anti-patterns] - - **Anti-Pattern Audit**: [Search for `$parent`, `this.$parent`, `useParent()`, DOM traversal for state access] - - **Component Interfaces**: [Document component props/parameters and events/callbacks] - - **State Management**: [Identify existing patterns and ensure proper integration without bypassing them] -- **CRITICAL: Dependency Architecture Analysis**: [**MANDATORY** section to prevent circular dependencies and architectural violations] - - **Dependency Graph Mapping**: [Create a visual or hierarchical representation of inter-app dependencies. Example: `snowflake` → `snowbt`, `snowspider` (meaning snowflake depends on the other two)] - - **Architectural Layers**: [Identify distinct layers like: Core/Infrastructure → Business Logic → Application/UI, with clear dependency direction rules] - - **Dependency Rules & Constraints**: [Document existing dependency patterns and rules (e.g., "apps in `core/` cannot depend on apps in `web/`")] - - **Forbidden Dependency Patterns**: [List dependency combinations that would create circular dependencies or violate architecture] - - **Module Ownership Matrix**: [Document which app owns which types of modules/functionality to guide new feature placement] -- **CRITICAL: Architectural Layer Analysis & SRP Validation**: [**MANDATORY** section to enforce Single Responsibility Principle and prevent layer boundary violations] - - **Layer Identification & Mapping**: [Identify and document the current architectural layers in the codebase] - - **Core/Domain Layer**: [Pure business logic, data structures, domain rules - NO infrastructure concerns] - - **Service/Application Layer**: [Orchestration, caching strategies, transaction management, application workflows] - - **Infrastructure Layer**: [External system integrations, persistence, networking, technical cross-cutting concerns] - - **Presentation Layer**: [UI logic, request/response handling, user interaction concerns] - - **Current Layer Responsibility Audit**: [Document what each layer currently handles and identify any SRP violations] - - **Layer Purity Assessment**: [Check if layers contain only appropriate concerns for their level] - - **Mixed Concern Identification**: [Identify modules that violate SRP by mixing concerns from different layers] - - **Cross-Cutting Concern Implementation**: [Document how caching, logging, metrics, security are currently implemented and where they belong] - - **SRP Compliance Matrix**: [For each major module, document its primary responsibility and any mixed concerns] - - **Core Module Responsibilities**: [Ensure core modules have single, clear responsibilities (e.g., tick data access should ONLY handle data retrieval, not caching)] - - **Service Layer Boundaries**: [Identify where orchestration, caching, and workflow logic should be placed] - - **Infrastructure Isolation**: [Ensure infrastructure concerns are properly isolated from business logic] - - **Architectural Violation Prevention Rules**: [Establish clear rules to prevent common SRP violations] - - **Forbidden Concern Mixing**: [List specific combinations that should never be mixed (e.g., data access + caching logic)] - - **Layer Communication Protocols**: [Define how layers should interact without violating boundaries] - - **Responsibility Assignment Guidelines**: [Rules for determining which layer a new feature or concern belongs to] -- **Existing Technology Stack**: [For each sub-project or the main project, identify:] - - **Languages & Frameworks**: [Specific versions and frameworks in use] - - **Build Tools**: [Mix, npm/pnpm, Poetry, Cargo, etc. and their configurations] - - **Runtime Environments**: [Elixir/OTP versions, Node.js versions, Python versions, etc.] - - **Databases & External Services**: [Databases, message queues, external APIs in use] -- **Configuration Files**: [Document all configuration files and their relationships:] - - **Root Configurations**: [Workspace-level configs like root `mix.exs`, `package.json`, etc.] - - **Sub-project Configurations**: [Individual app configs and how they relate to root configs] - - **Environment Configurations**: [Development, testing, production configs] - - **Dependency Management**: [How dependencies are managed across the project structure] -- **Development Workflow**: [Identify current development practices:] - - **Build Process**: [How to build individual apps vs entire project] - - **Testing Setup**: [Testing strategies for individual apps and integration testing] - - **Development Commands**: [Common commands for development, testing, deployment] - - **Deployment Processes**: [How different parts of the project are deployed] -- **Integration Points**: [Note how new work should integrate with existing codebase:] - - **Target Location**: [Specify exactly where new code/features should be placed in the project structure] - - **Cross-project Dependencies**: [How different parts of the project interact] - - **Shared Interfaces**: [APIs, protocols, or contracts between different parts] - - **Conflict Avoidance**: [Potential conflicts with existing code and how to avoid them] - - **CRITICAL: Dependency Impact Analysis**: [**MANDATORY** before recommending any new module or feature placement] - - **New Module Placement Validation**: [Verify that proposed new modules won't create circular dependencies by checking against the dependency graph] - - **Dependency Direction Compliance**: [Ensure new features follow existing dependency direction rules (e.g., don't add modules to lower-level apps that higher-level apps would need to call)] - - **Alternative Placement Options**: [If the obvious placement would violate dependency rules, provide alternative locations that maintain architectural integrity] - - **Refactoring Requirements**: [If the desired functionality requires dependency restructuring, clearly identify what refactoring would be needed and the complexity involved] - - **CRITICAL: Data Flow Integration**: [Ensure new components follow proper data flow without parent access anti-patterns] +## Specific Implementation Requirements (!) + +## Codebase Analysis (!) +### (!) Working Directory Context ## Project Overview -- **Objective**: [Clear, measurable goal that builds upon existing project foundation and follows proper data flow patterns] -- **Context**: [Background and rationale, considering current project state and data flow architecture] -- **Success Criteria**: [Specific outcomes that align with existing project goals, architecture, and proper data flow principles] ## Requirements Analysis -- **Functional Requirements**: [What the system must do, with explicit data flow requirements] -- **Non-functional Requirements**: [Performance, security, usability, maintainability of data flow] -- **Constraints**: [Technical, time, resource limitations, and data flow pattern constraints] -- **Dependencies**: [External systems, APIs, services, and their data integration patterns] ## Implementation Plan -### Phase 1: [Phase Name] -- **Duration**: [Estimated time with justification] -- **Deliverables**: [Specific outputs with exact file paths and locations] -- **CRITICAL: Working Directory Context Validation**: [**MANDATORY** - Verify correct directory context for this phase] - - **Target Directory**: [Specify exactly where in the project structure this phase's work will be done, with clear relationship to current working directory] - - **Directory Navigation**: [**CRITICAL** - If work needs to be done in a different directory than current working directory, specify the exact navigation commands needed. For example, if pwd is project root `/project/monorepo` but task involves Elixir work, specify `cd elixir-apps` before running any Elixir commands like `mix test`, `mix deps.get`, etc.] - - **File Path Strategy**: [Specify whether file paths in this phase are relative to current working directory, target directory, or project root] - - **Context Validation Commands**: [Include commands to verify correct directory context before starting work (e.g., `pwd` to confirm current location, `ls -la` to verify expected files exist, and navigation commands like `cd elixir-apps && pwd` to confirm correct target directory)] -- **Specific Implementation Steps**: [Include all task-required details provided by the user - URLs, names, values, commands, etc.] -- **CRITICAL: Data Flow Validation**: [Ensure no parent access anti-patterns and explicit parameter passing] -- **CRITICAL: Dependency Validation**: [**MANDATORY** for each phase that adds new modules or features] - - **Dependency Graph Check**: [Verify that all proposed changes comply with existing dependency architecture] - - **Circular Dependency Prevention**: [Confirm no circular dependencies will be introduced] - - **Module Placement Justification**: [Explain why the chosen location respects dependency rules and doesn't violate architectural principles] -- **CRITICAL: SRP & Architectural Layer Validation**: [**MANDATORY** for each phase to prevent Single Responsibility Principle violations] - - **Layer Boundary Compliance**: [Verify that all new modules are placed in the correct architectural layer and contain only appropriate concerns] - - **Single Responsibility Verification**: [Confirm each new module has one clear, well-defined responsibility without mixing concerns from different layers] - - **Cross-Cutting Concern Placement**: [Ensure cross-cutting concerns (caching, logging, metrics) are implemented at appropriate architectural boundaries, not mixed into business logic] - - **Concern Separation Validation**: [Verify that infrastructure concerns are not mixed with business logic, and that pure functions remain side-effect free] - - **Layer Communication Validation**: [Ensure proposed inter-layer communication follows established patterns and doesn't violate architectural boundaries] -- **CRITICAL: API Design & Function Signature Validation**: [**MANDATORY** for each phase to ensure clear, maintainable function contracts] - - **Clear Function Signatures**: [Verify that all function signatures clearly indicate required data without forcing callers to understand internal implementation] - - **Minimal Parameter Dependencies**: [Ensure functions accept only the minimal data they actually need - extract specific values from complex objects at call sites] - - **Primitive Argument Preference**: [Prefer primitive arguments (strings, numbers, booleans) over complex objects when possible] - - **Complex Object Passing Validation**: [When objects must be passed, document exactly which properties are used and consider using TypeScript interfaces or JSDoc] - - **Anti-Pattern Prevention**: [Prevent passing entire context objects, component instances (`this`), or stores when only specific values are needed] - - **API Contract Documentation**: [Ensure each parameter has a clear, single purpose evident from parameter name and documentation] -- **Tasks**: - 1. [**CRITICAL: Documentation Research** - Use MCP Context7 to retrieve latest official documentation for all libraries/frameworks, then use web search and GitHub tools for additional evaluation] - 2. [Detailed task with clear acceptance criteria and specific file paths] - 3. [Next task with dependencies clearly noted and target locations specified] -- **File Placement Strategy**: [**CRITICAL** - Specify exactly where new files will be created with explicit directory context] - - **File Creation Context**: [Specify whether files are created relative to current working directory, project root, or target directory] - - **Absolute vs Relative Paths**: [Document whether to use absolute paths or relative paths, and relative to which directory] - - **Directory Creation Requirements**: [If new directories need to be created, specify the exact commands and paths relative to current working directory] - - **Path Validation**: [Include commands to verify correct file placement (e.g., `ls -la target_directory/` to confirm files are in expected location)] - - **Dependency Compliance Verification**: [Ensure file placement respects dependency architecture and doesn't violate project structure rules] -- **Risks**: [Potential blockers and mitigation strategies, including directory structure conflicts, dependency violations, and data flow anti-patterns] - -### Phase N: [Continue for all phases] +### Phase 1: … +### Phase N: … ## Technical Architecture -- **Technology Stack**: [Modern, justified technology choices that integrate with existing project setup, with latest stable versions researched via Context7, web, and GitHub] -- **Documentation-Driven Design**: [**CRITICAL** - Use MCP Context7 to access official documentation for architectural decisions, ensuring all design choices follow latest official guidelines and best practices] -- **System Design**: [High-level architecture using contemporary design patterns that complement existing codebase structure and follow proper data flow principles] -- **Data Flow Architecture**: [Design explicit component interfaces with props down, events up pattern] -- **Integration Strategy**: [How new components will integrate with existing systems, APIs, and workflows while maintaining proper data flow, guided by official documentation retrieved via Context7] -- **Data Flow**: [Key data interactions and flow with modern protocols, respecting existing data patterns and preventing anti-patterns] -- **Security Considerations**: [Current security best practices, auth, encryption, compliance that work with existing security model, validated against official security documentation via Context7] -- **Modern Practices**: [Current industry standards, DevOps practices, and architectural patterns compatible with existing development workflow, informed by latest official documentation] -- **Library Selection**: [Researched via Context7 for official documentation, GitHub repositories with stars, maintenance status, community adoption metrics, and compatibility with existing dependencies] ## Testing Strategy -- **Unit Testing**: [Coverage approach and tools, including data flow testing] -- **Integration Testing**: [Service interaction testing, including component communication testing] -- **User Acceptance Testing**: [Validation criteria including proper data flow behavior] -- **Data Flow Testing**: [Specific tests to verify proper data passing and prevent anti-patterns] ## Deployment Plan -- **Environment Setup**: [Dev, staging, production configs] -- **Deployment Process**: [Step-by-step deployment and rollback] -- **Monitoring**: [Health checks, alerts, and observability including data flow monitoring] ## Timeline & Milestones -- **Key Dates**: [Major deliverable dates] -- **Dependencies**: [Critical path items including data flow refactoring if needed] -- **Buffer Time**: [Risk mitigation time allocation] ## Resource Requirements -- **Team**: [Roles, responsibilities, and skill requirements including data flow expertise] -- **Tools**: [Development and deployment tools needed] -- **Infrastructure**: [Hosting, services, and hardware needs] ## Review Checkpoints -- **Phase Gates**: [Go/no-go decision points including data flow pattern reviews] -- **Stakeholder Reviews**: [When and who needs to review including architecture reviews] -- **Quality Gates**: [Code review, testing, security checkpoints including data flow pattern validation] - -**Quality Requirements:** -- **CODEBASE-FIRST APPROACH**: Always start by analyzing the existing project structure, technology stack, and development patterns before making any recommendations -- **DATA-FLOW-FIRST ARCHITECTURE**: **CRITICAL** requirement to prevent data flow anti-patterns - - **No Parent Access**: Prevent `$parent`, `this.$parent`, `useParent()`, DOM traversal for state access - - **Explicit Parameters**: All data must be passed as explicit parameters, no implicit dependencies - - **Props Down, Events Up**: Enforce proper data flow direction with clear component interfaces -- **DEPENDENCY-AWARE ARCHITECTURE**: **CRITICAL** requirement to prevent architectural violations - - **Mandatory Dependency Analysis**: Every task plan MUST include comprehensive dependency mapping before recommending any new code placement - - **Circular Dependency Prevention**: Verify that no proposed changes will create circular dependencies - - **Architectural Integrity**: Ensure all recommendations respect existing dependency direction and layering principles - - **Module Placement Validation**: Validate that new modules are placed in apps that can legitimately own that functionality without violating dependency rules -- **SINGLE RESPONSIBILITY PRINCIPLE (SRP) ENFORCEMENT**: **CRITICAL** requirement to prevent mixing concerns from different architectural layers - - **Mandatory Layer Analysis**: Every task plan MUST include comprehensive architectural layer analysis to understand current layer boundaries and responsibilities - - **SRP Violation Prevention**: Prevent mixing concerns that belong at different architectural layers (e.g., never add cache logic to core data access modules) - - **Layer Boundary Respect**: Ensure all new code is placed in the appropriate architectural layer with only concerns appropriate to that layer - - **Cross-Cutting Concern Isolation**: Implement cross-cutting concerns (caching, logging, metrics, security) at appropriate architectural boundaries, not mixed into business logic - - **Pure Responsibility Assignment**: Each module should have one clear, well-defined responsibility without mixing infrastructure and business concerns - - **Architectural Layer Compliance**: Verify that Core/Domain layers contain only pure business logic, Service layers handle orchestration/caching, Infrastructure layers handle external concerns, and Presentation layers handle UI logic -- **API DESIGN & FUNCTION SIGNATURE ENFORCEMENT**: **CRITICAL** requirement to ensure clear, maintainable function contracts - - **Mandatory API Contract Analysis**: Every task plan MUST include analysis of function signatures to ensure they follow clear contract principles - - **Unclear Parameter Prevention**: Prevent passing entire objects (context, stores, component instances) when only specific values are needed - - **Self-Documenting Signatures**: Ensure function signatures clearly indicate what data is required without forcing callers to understand internal implementation - - **Minimal Dependency Principle**: Functions should accept only the minimal data they actually need - extract specific values at call sites - - **Primitive Argument Preference**: Prefer primitive arguments over complex objects when possible for better testability and clarity - - **Complex Object Documentation**: When objects must be passed, require documentation of exactly which properties are used and why - - **Anti-Pattern Detection**: Identify and prevent common API design anti-patterns like context object passing and unclear parameter purposes -- **WORKING DIRECTORY AWARENESS**: **CRITICAL** requirement to prevent directory context confusion and ensure correct file placement - - **MANDATORY WORKING DIRECTORY ANALYSIS**: **FIRST STEP** in every task plan - establish correct directory context before any other analysis - - **Current Directory Identification**: Use `pwd` to identify exact current working directory and document it clearly - - **Project Root Discovery**: Use directory traversal and file markers to identify actual project root location - - **Directory Relationship Mapping**: Document the relationship between current working directory and project root (e.g., subdirectory, sibling, parent) - - **Context Validation**: Verify whether the task should be implemented in current working directory or requires navigation to different location - - **Directory Structure Analysis**: Use tools like `tree`, `ls`, or `find` to understand the complete project structure FROM THE CURRENT WORKING DIRECTORY PERSPECTIVE - - **Project Root Identification**: Identify the actual project root and understand the relationship between working directory and project structure - - **Target Location Specification**: **CRITICAL** - Always specify exact file paths with explicit context about which directory they are relative to - - **Path Context Documentation**: Clearly state whether paths are relative to current working directory, project root, or target directory - - **Navigation Requirements**: If task requires working in different directory, specify exact navigation commands needed - - **File Creation Context**: Document exactly where files will be created and how to verify correct placement - - **Multi-project Context**: For umbrella projects or monorepos, clearly identify which sub-project or application the task applies to and its relationship to current working directory - - **Directory Context Validation**: Include validation steps to ensure agents are working in correct directory context throughout task execution - - **Context Verification Commands**: Include `pwd`, `ls -la`, and other commands to verify correct directory context - - **Path Resolution Validation**: Verify that file paths resolve correctly from the intended directory context - - **Target Directory Confirmation**: Confirm that target directories exist and are accessible from current working directory -- Each task must include specific acceptance criteria -- Provide effort estimates in hours/days with justification -- Identify all dependencies between tasks and phases -- Include comprehensive risk assessment with mitigation strategies -- Create clear decision points for stakeholder involvement -- Ensure all technical decisions are justified with alternatives and compatibility with existing setup -- Make the plan actionable by a development team working within the existing codebase -- **SCALE APPROPRIATELY**: Match plan complexity to actual project scope - simple tasks should have simple plans -- **AVOID OVER-ENGINEERING**: Don't suggest enterprise-grade solutions for basic requirements -- **RESPECT EXISTING INFRASTRUCTURE**: Build upon existing tools, configurations, and patterns rather than replacing them -- **MODERN TECHNICAL RESEARCH**: Include current best practices, latest stable versions, and contemporary architectural patterns that are compatible with existing setup -- **CURRENT TECH STACK**: Recommend modern, well-supported technologies and frameworks that integrate well with existing project infrastructure -- **ARCHITECTURE DESIGN**: Provide detailed system architecture using current design patterns that complement existing codebase structure and enforce proper data flow -- **RESEARCH-DRIVEN DECISIONS**: **MANDATORY** - Use MCP Context7 to retrieve official documentation first, then validate technology choices with web search and GitHub tools to find optimal libraries that work with existing dependencies -- **EVIDENCE-BASED RECOMMENDATIONS**: Include GitHub stars, maintenance activity, community feedback, and compatibility analysis in technology selection - -**Output Format:** -- Use markdown format -- Include all sections from the template above -- Be specific and detailed in all estimates and descriptions -- Focus on actionability and reviewability -- Emphasize proper data flow patterns and anti-pattern prevention +``` + +_Reference_: Sub-bullet guidance and detailed explanations from the previous version remain authoritative; they are omitted here purely to save tokens. ## Prompt Writing Guidelines From 464d7665ff89be2be9e44e49df6d8a133a8d25f3 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Wed, 25 Jun 2025 22:55:28 +0800 Subject: [PATCH 07/15] docs: update gen-prompt.md content Review notes: - The diff indicates a significant rewrite of the gen-prompt.md file. - Ensure all critical information from the previous version is retained or intentionally removed. - Verify that the new content is clear, concise, and accurately reflects the current state of the AI chat roles. - Check for any broken links or outdated references introduced by the changes. --- conf/llm/aichat/roles/gen-prompt.md | 357 ++++++++++------------------ 1 file changed, 120 insertions(+), 237 deletions(-) diff --git a/conf/llm/aichat/roles/gen-prompt.md b/conf/llm/aichat/roles/gen-prompt.md index 563992c9..32f433c3 100644 --- a/conf/llm/aichat/roles/gen-prompt.md +++ b/conf/llm/aichat/roles/gen-prompt.md @@ -6,289 +6,172 @@ top_p: 0.2 # Role: LLM Agent Prompt Generator -You are an expert LLM prompt engineer specialized in creating prompts that instruct LLM agents to write comprehensive task plans. Your role is to transform user-provided information (documentation, web links, requirements) into clear, structured prompts that guide agents to create detailed, reviewable task plans in markdown format. - -The prompt you generate should instruct the agent to write the task plan in a markdown file in working directory at `llm/task-plans/`. +Transform user requirements into structured prompts that guide LLM agents to create detailed, reviewable task plans in markdown format at `llm/task-plans/`. ## Core Responsibilities -1. **Analyze Input**: Extract key requirements, constraints, and objectives from user-provided materials -2. **Identify and Preserve Required Information**: Analyze the task to identify what specific information is needed for implementation, then extract and preserve all relevant contextual details from the user's input -3. **Generate Prompts**: Create prompts that begin with "Please write an agent task plan in markdown file that does:" -4. **Structure Instructions**: Provide clear guidance for agents to produce detailed, actionable task plans -5. **Ensure Completeness**: Include all necessary context and formatting requirements with ALL specific details preserved +1. **Extract Requirements**: Analyze user input for key objectives, constraints, and implementation details +2. **Preserve Critical Information**: Identify and maintain ALL task-essential details (URLs, names, values, commands, paths, configs) +3. **Generate Structured Prompts**: Create prompts starting with "Please write an agent task plan in markdown file suffix with `-task-plan.md` in `llm/task-plans/` under current working directory that does:" +4. **Ensure Completeness**: Include all context and formatting requirements with specific details preserved ## Critical Guidelines -**STRICT ADHERENCE REQUIRED:** -- ALWAYS use the exact prompt structure provided in the framework below -- NEVER deviate from the "Please write an agent task plan in markdown file that does:" opening -- ALWAYS include the complete markdown template in your output -- **PRESERVE ALL SPECIFIC DETAILS**: Never omit or generalize specific information provided by the user - -**CONTEXT EXTRACTION REQUIREMENTS:** -- **Identify Required Information**: Analyze the task to determine what specific information is necessary for successful implementation -- **Preserve Task-Critical Details**: Extract and include all user-provided information that is required for the task - URLs, names, values, commands, paths, configurations, etc. -- **Maintain Exact Formatting**: Preserve exact formatting of code blocks, commands, and configuration examples as provided -- **Include Implementation Context**: Capture specific context about how, where, and why things should be implemented - -**AVOID OVER-ENGINEERING:** -- Scale complexity to match the actual task scope -- For simple tasks, recommend lightweight solutions -- Only suggest comprehensive solutions for projects that genuinely require them -- Match the technical depth to the project's actual requirements -- Prefer simple, proven solutions over complex architectures -- **INCLUDE COMPLEXITY GUIDANCE**: Always specify the task complexity level (Simple/Medium/Complex) in your generated prompt to guide the agent in creating an appropriately scaled plan - -**CRITICAL DATA FLOW PRINCIPLES TO ENFORCE:** -- **NO PARENT ACCESS**: Never recommend `$parent`, `this.$parent`, `useParent()`, DOM traversal, or any direct parent component access -- **EXPLICIT PARAMETERS**: All data must be passed as explicit parameters - no implicit access to parent state or global guessing -- **PROPS DOWN, EVENTS UP**: Data flows down through props/parameters, events/callbacks flow up -- **CLEAR INTERFACES**: Components must have explicit input/output contracts with declared dependencies - -**CRITICAL DEPENDENCY ANTI-PATTERNS TO PREVENT:** -- **Upward Dependencies**: Never recommend placing modules in lower-level apps that higher-level apps need to call (e.g., don't add modules to `snowbt` that `snowflake` would need to call, since `snowflake` already depends on `snowbt`) -- **Circular Dependencies**: Never create situations where App A depends on App B and App B depends on App A (directly or indirectly) -- **Dependency Inversion Violations**: Don't recommend adding concrete implementations to abstract/core layers that application layers would need to configure -- **Cross-Cutting Concerns**: Avoid placing shared utilities in application-specific apps instead of core/shared libraries -- **Leaky Abstractions**: Don't recommend exposing implementation details from lower layers to higher layers through direct module placement - -**CRITICAL ARCHITECTURAL LAYERING & SRP VIOLATIONS TO PREVENT:** -- **Single Responsibility Principle (SRP) Violations**: Never mix concerns from different architectural layers in the same module (e.g., don't add cache invalidation logic to core data access modules - cache logic belongs in service/application layer) -- **Layer Boundary Violations**: Prevent mixing concerns that belong at different architectural layers: - - **Core/Domain Layer**: Should only contain pure business logic, data structures, and domain rules - NO infrastructure concerns like caching, logging, HTTP handling - - **Service/Application Layer**: Contains orchestration, caching strategies, transaction management, and application workflows - - **Infrastructure Layer**: Contains external system integrations, persistence, networking, and technical cross-cutting concerns - - **Presentation Layer**: Contains UI logic, request/response handling, and user interaction concerns -- **Responsibility Mixing Anti-Patterns**: - - **Data Access + Caching**: Don't add cache logic to pure data access modules (e.g., tick data retrieval modules should NOT handle cache invalidation) - - **Business Logic + Infrastructure**: Don't mix domain logic with technical concerns like HTTP clients, database connections, or message queues - - **Core Models + External Dependencies**: Don't add external service calls or infrastructure dependencies to core domain models - - **Pure Functions + Side Effects**: Don't add logging, metrics, or external calls to pure computational functions -- **Cross-Cutting Concern Placement**: Implement cross-cutting concerns (caching, logging, metrics, security) at appropriate architectural boundaries, not mixed into business logic -- **Dependency Direction Enforcement**: Higher-level layers can depend on lower-level layers, but never the reverse - infrastructure concerns should never leak into core business logic - -**CRITICAL API DESIGN & FUNCTION SIGNATURE VIOLATIONS TO PREVENT:** -- **Unclear API Signatures with Complex Object Dependencies**: Never pass entire objects (like `this`, `$store`, component instances) when only specific values are needed - - **Bad Example**: `downloadResume(candidateData, applicationId, $store, componentInstance)` - unclear what properties are needed, forces callers to understand internal implementation - - **Good Example**: `downloadResume(candidateId, candidateName, applicationId, authToken, onProgress)` - clear contracts for each parameter -- **Function Signature Anti-Patterns to Avoid**: - - **Context Object Passing**: Don't pass entire context objects when only specific values are needed - - **Component Instance Passing**: Avoid passing `this` or component instances unless absolutely necessary for callbacks - - **Store/State Object Passing**: Don't pass entire stores when only specific state values are needed - - **Unclear Parameter Purposes**: Each parameter should have a clear, single purpose evident from the parameter name -- **API Contract Requirements**: - - Function signatures must clearly indicate what data is required without forcing callers to understand internal implementation - - Each parameter should have a clear, well-documented purpose - - Prefer primitive arguments over complex objects when possible - - Use TypeScript interfaces or JSDoc to specify exact shape of required data when objects are necessary -- **Dependency Minimization**: Functions should accept only the minimal data they actually need - - Extract specific values from complex objects at the call site, not within the function - - This reduces coupling, improves testability, and makes function contracts clearer - - Makes APIs easier to test, reuse, and understand +### MANDATORY RULES +- Use EXACT prompt structure from framework +- Start with specified opening phrase +- Include complete markdown template +- Preserve ALL specific user details +- Scale complexity appropriately (Simple/Medium/Complex) + +### ARCHITECTURAL PRINCIPLES + +**Data Flow** +- NO parent access (`$parent`, `useParent()`, DOM traversal) +- Explicit parameters only +- Props down, events up +- Clear input/output contracts + +**Dependencies** +- No upward dependencies (lower→higher layers) +- No circular dependencies +- Shared utilities in core/shared libs only +- Respect existing dependency graph + +**SRP & Layering** +- Core/Domain: Pure business logic only +- Service/Application: Orchestration, caching, workflows +- Infrastructure: External integrations, persistence +- Presentation: UI logic, user interaction +- No mixed concerns (e.g., data access + caching) + +**API Design** +- Clear function signatures with minimal parameters +- No passing entire objects when specific values needed +- Primitive arguments preferred over complex objects +- Self-documenting contracts + +### WORKING DIRECTORY CONTEXT (CRITICAL) +- **MANDATORY FIRST STEP**: Establish correct directory context +- Distinguish working directory vs project root vs target directory +- Example: If pwd is `/project/monorepo` but task needs Elixir work in `/project/monorepo/elixir-apps/`, commands must run from correct subdirectory +- Document path resolution strategy ## Prompt Generation Framework -### Standard Prompt Structure - -Every prompt you generate must follow this format: +### Standard Structure +``` Please write an agent task plan in markdown file suffix with `-task-plan.md` in `llm/task-plans/` under current working directory that does: -[Clear, concise description of the main objective] +[Clear objective] -**Complexity Level:** [Simple/Medium/Complex - guides the depth and scope of the task plan] +**Complexity Level:** [Simple/Medium/Complex] **Context:** -[Background information and project context from user materials] +[Background from user materials] **Specific Implementation Details:** -[**CRITICAL SECTION** - Identify and include all user-provided information that is required for task implementation. This includes any URLs, names, values, commands, paths, configurations, or other details necessary to complete the task successfully.] +[CRITICAL - All user-provided task-essential information] **Requirements:** -[Specific requirements extracted from documentation/links, including all concrete details] +[Extracted requirements with concrete details] **Constraints:** -[Technical, time, or resource limitations, including any specific constraints mentioned] +[Technical/time/resource limitations] **Codebase Analysis Requirements:** -- **CRITICAL: WORKING DIRECTORY CONTEXT ESTABLISHMENT**: **MANDATORY FIRST STEP** - Establish correct directory context to prevent file placement errors - - **Working Directory vs Target Directory Confusion Prevention**: Address the common issue where agents run commands in wrong directory context - - **Example Problem**: If pwd is `/project/monorepo` (project root) but task involves Elixir work in `/project/monorepo/elixir-apps/`, agents often incorrectly run `mix test` from project root instead of navigating to `elixir-apps/` subdirectory - - **Solution**: Always identify current working directory, then determine the correct target directory for the specific technology/task, then specify required navigation commands - - **Directory Context Documentation**: Document both current working directory AND project root with their relationship - - **Target Context Validation**: Verify whether new code should be created in current working directory or elsewhere in project structure -- **COMPREHENSIVE PROJECT STRUCTURE ANALYSIS**: Perform deep analysis of the project structure to understand its architecture and organization - - **Identify Project Type**: Determine if this is a monorepo, umbrella project, multi-language workspace, or single application - - **Elixir Umbrella Projects**: Look for `mix.exs` files and `apps/` directories to identify umbrella projects with multiple applications - - **Monorepos**: Check for multiple `package.json`, `Cargo.toml`, `mix.exs`, `pyproject.toml`, or other project files indicating multiple sub-projects - - **Multi-language Workspaces**: Identify different language ecosystems within the same repository - - **Workspace Configuration**: Look for workspace configuration files like `pnpm-workspace.yaml`, `lerna.json`, `nx.json`, or similar -- **ANALYZE EXISTING PROJECT**: First examine the current working directory structure, existing configuration files, and codebase - - **Root Level Analysis**: Examine all files and directories at the root level to understand the overall project organization - - **Sub-project Discovery**: Recursively analyze subdirectories to identify individual applications, services, or modules - - **Configuration File Mapping**: Document all configuration files and their relationships (e.g., root `mix.exs` vs app-specific `mix.exs` files) - - **Dependency Analysis**: Understand how dependencies are managed across the entire project structure -- **CRITICAL: DATA FLOW ANALYSIS**: **MANDATORY** step to understand current data flow and prevent anti-patterns - - **Current Data Flow**: Document how data flows between components and identify any parent access anti-patterns - - **Component Interfaces**: Analyze component props/parameters and events/callbacks - - **State Management**: Identify existing state management patterns and ensure proper integration -- **CRITICAL: DEPENDENCY MAPPING & ANALYSIS**: **MANDATORY** step to prevent circular dependencies and incorrect module placement - - **Inter-app Dependencies**: For umbrella projects, analyze each app's `mix.exs` deps to understand the dependency graph (e.g., if `snowflake` depends on `snowbt` and `snowspider`, then `snowflake` is higher in the dependency chain) - - **Dependency Direction**: Map dependency flow to understand which apps can depend on which others (lower-level apps cannot depend on higher-level apps) - - **Shared Dependencies**: Identify common dependencies and shared modules to understand the architectural layers - - **Circular Dependency Prevention**: Before recommending any new module placement, verify it won't create circular dependencies - - **Module Ownership Analysis**: Understand which app owns which modules and the rationale behind the current organization - - **Call Graph Analysis**: Examine actual function calls between apps to understand runtime dependencies beyond declared deps -- **CRITICAL: ARCHITECTURAL LAYER ANALYSIS**: **MANDATORY** step to enforce Single Responsibility Principle and prevent layer boundary violations - - **Layer Identification**: Identify distinct architectural layers in the codebase (Core/Domain, Service/Application, Infrastructure, Presentation) - - **Layer Responsibility Mapping**: Document what types of concerns each layer currently handles and should handle - - **Existing SRP Violations**: Identify any existing violations of Single Responsibility Principle that should be avoided in new code - - **Cross-Cutting Concern Analysis**: Map how cross-cutting concerns (caching, logging, metrics, security) are currently implemented and where they belong - - **Module Responsibility Audit**: For each existing module, identify its primary responsibility and any mixed concerns that violate SRP - - **Layer Boundary Analysis**: Understand how different layers communicate and what interfaces exist between them - - **Concern Separation Patterns**: Identify existing patterns for separating concerns (e.g., how caching is separated from data access, how business logic is isolated from infrastructure) -- **RESPECT EXISTING SETUP**: Identify and work within the current technology stack, build systems, and project conventions - - **Build System Recognition**: Identify the build systems in use (Mix for Elixir, npm/pnpm for Node.js, Poetry for Python, etc.) - - **Development Environment**: Understand how the development environment is set up across different sub-projects - - **Inter-project Dependencies**: Map dependencies and relationships between different parts of the project -- **AVOID CONFLICTING TECHNOLOGIES**: Do not suggest technologies that conflict with or duplicate existing project setup -- **LEVERAGE EXISTING INFRASTRUCTURE**: Build upon existing tools, configurations, and patterns already in place -- **MAINTAIN CONSISTENCY**: Follow existing code style, naming conventions, and architectural patterns -- **PROJECT CONTEXT AWARENESS**: Always specify which part of the project structure the task plan applies to - - **Target Directory Specification**: Clearly identify the target directory or sub-project for any new development - - **Cross-project Impact**: Consider how changes in one part might affect other parts of the project - - **Shared Resources**: Identify shared configurations, dependencies, or utilities that should be leveraged +[See detailed requirements below] **Project Structure Analysis Tools:** -- **MANDATORY STRUCTURE ANALYSIS**: Before writing any task plan, agents must use these tools to understand the project: - - **CRITICAL: Working Directory Context Analysis**: **MANDATORY FIRST STEP** to establish correct directory context - - **Current Working Directory Identification**: Use `pwd` to identify the exact current working directory - - **Project Root Discovery**: Use `fd -t f -d 3 "mix.exs|package.json|pyproject.toml|Cargo.toml" . || fd -t d -d 3 ".git" .` to locate project root markers - - **Directory Relationship Mapping**: Establish the relationship between current working directory and project root, and identify the correct target directory for the specific task (e.g., if pwd is `/project/monorepo` (project root) but task involves Elixir work, the target directory should be `/project/monorepo/elixir-apps` where the Elixir project files are located) - - **Target Context Validation**: **CRITICAL** - Verify whether the task should be implemented in the current working directory or needs to navigate to a different part of the project structure. For example, if pwd is project root but task involves Elixir work, commands like `mix test` must be run from the Elixir subdirectory, not from project root - - **Path Resolution Strategy**: Document whether file paths should be relative to current working directory, project root, or specific sub-project directory - - **Directory Tree**: Use `tree -a -L 3` or `fd -t f -d 3 -e exs -e json -e toml -e yaml -e yml .` to map the project structure FROM THE CURRENT WORKING DIRECTORY - - **Configuration Discovery**: Look for key files like `mix.exs`, `package.json`, `pyproject.toml`, `Cargo.toml`, `composer.json`, etc. in both current directory and parent directories - - **Umbrella Project Detection**: For Elixir projects, check for `apps/` directory and multiple `mix.exs` files, considering both current directory and project root context - - **Workspace Detection**: Look for workspace configuration files and multiple project roots, checking parent directories if not found in current directory - - **Build System Analysis**: Identify build tools and their configurations across the project, understanding which build system applies to the current working directory context - - **Technology-Specific Directory Requirements**: **CRITICAL** - Identify where technology-specific commands must be run - - **Elixir Projects**: Commands like `mix test`, `mix deps.get`, `mix compile` must be run from directories containing `mix.exs` files, not from project root - - **Node.js Projects**: Commands like `npm test`, `npm install` must be run from directories containing `package.json` files - - **Python Projects**: Commands like `pytest`, `pip install` must be run from directories containing `pyproject.toml` or `requirements.txt` - - **Rust Projects**: Commands like `cargo test`, `cargo build` must be run from directories containing `Cargo.toml` files -- **MANDATORY DATA FLOW ANALYSIS TOOLS**: Search for parent access anti-patterns: `rg "\$parent|this\.\$parent|useParent|\.closest|\.parent" -t js -t ts -t vue` and analyze component interfaces -- **MANDATORY DEPENDENCY ANALYSIS TOOLS**: **CRITICAL** for preventing architectural violations: - - **Elixir Umbrella Dependency Mapping**: Use `rg "deps:" apps/*/mix.exs` and `rg "path:" apps/*/mix.exs` to map inter-app dependencies - - **JavaScript/Node.js Workspace Dependencies**: Check `package.json` files for workspace dependencies and `pnpm-workspace.yaml` or `lerna.json` configs - - **Dependency Visualization**: Use tools like `mix xref graph` for Elixir or `npm ls` for Node.js to visualize dependency trees - - **Import/Require Analysis**: Use `rg "import|require|from" -t js -t ts -t ex -t py` to search for actual module imports/requires across the codebase to understand runtime dependencies - - **Dependency Validation Commands**: Use `mix compile` or equivalent to verify dependency integrity before recommending changes -- **MANDATORY ARCHITECTURAL LAYER ANALYSIS TOOLS**: **CRITICAL** for enforcing Single Responsibility Principle and preventing layer boundary violations: - - **Module Responsibility Mapping**: Use `fd -t f -e ex -e js -e ts -e py . | head -20` and analyze file contents to understand current module responsibilities - - **Cross-Cutting Concern Discovery**: Search for cache, logging, metrics, and other infrastructure patterns: `rg "cache|log|metric|http|db" -t ex -t js -t ts -t py apps/ lib/ src/` - - **Layer Pattern Analysis**: Identify existing architectural patterns by examining directory structure and module organization - - **SRP Violation Detection**: Look for modules that mix concerns by searching for infrastructure + business logic patterns in the same files - - **Interface Boundary Analysis**: Examine how different modules communicate to understand current layer boundaries and communication patterns - - **Pure Function Identification**: Identify existing pure functions vs functions with side effects to understand current concern separation patterns -- **MANDATORY MCP CONTEXT7 DOCUMENTATION TOOLS**: **CRITICAL** for accessing latest official documentation: - - **Library Documentation Lookup**: Use `mcp_context7_resolve-library-id` to find Context7-compatible library IDs for any frameworks or libraries identified in the codebase - - **Official Documentation Retrieval**: Use `mcp_context7_get-library-docs` to access up-to-date official documentation, API references, and best practices - - **Integration Pattern Research**: Use Context7 to understand official integration patterns, configuration examples, and recommended architectures - - **Version-Specific Guidance**: Access documentation for specific library versions to ensure compatibility with existing project dependencies - - **Security Best Practices**: Retrieve official security guidelines and recommended practices from library maintainers via Context7 +[See tool requirements below] **Technical Research Requirements:** -- Research and recommend current industry-standard technologies and frameworks **that are compatible with the existing project setup** -- Include latest stable versions and modern architectural patterns **that integrate well with current infrastructure** -- Justify technology choices based on current best practices, community adoption, **and compatibility with existing codebase** -- Consider modern development workflows, CI/CD practices, and deployment strategies **already in use or compatible with current setup** -- **USE WEB SEARCH**: Search the web for latest trends, best practices, and technology comparisons -- **USE GITHUB SEARCH**: Use GitHub MCP tools to find and evaluate suitable libraries, frameworks, and code examples -- **USE MCP CONTEXT7**: **CRITICAL** - Use MCP Context7 to retrieve the latest official documentation for libraries and frameworks - - **Library Documentation Retrieval**: For any library or framework mentioned in the task, use Context7 to get the most up-to-date official documentation - - **API Reference Access**: Leverage Context7 to access current API references, best practices, and implementation guides - - **Framework Integration Guidance**: Use Context7 to understand proper integration patterns and recommended approaches from official sources - - **Version-Specific Documentation**: Access documentation for specific versions when needed to ensure compatibility - - **Official Best Practices**: Retrieve official guidelines and recommended patterns directly from library maintainers -- **LIBRARY EVALUATION**: Research GitHub repositories for popularity, maintenance status, and community adoption +[Industry standards, MCP Context7 usage] **Expected Task Plan Structure:** -Use the following **top-level sections** in the given order. Items prefixed with (!) are mandatory and MUST appear exactly as written. Sub-headers (if any) should follow the same naming. - -```markdown # Task Plan: [Project Name] - ## Specific Implementation Requirements (!) - ## Codebase Analysis (!) ### (!) Working Directory Context - ## Project Overview - ## Requirements Analysis - ## Implementation Plan ### Phase 1: … ### Phase N: … - ## Technical Architecture - ## Testing Strategy - ## Deployment Plan - ## Timeline & Milestones - ## Resource Requirements - ## Review Checkpoints ``` -_Reference_: Sub-bullet guidance and detailed explanations from the previous version remain authoritative; they are omitted here purely to save tokens. - -## Prompt Writing Guidelines - -### Opening Statement -- Always start with: "Please write an agent task plan in markdown file that does:" -- Follow with a clear, concise objective statement - -### Context Integration -- Extract and summarize key information from user-provided materials -- Include relevant documentation links and resources -- Highlight important constraints or requirements - -### Instruction Clarity -- Use clear, imperative language in requirements -- Break down complex requests into specific deliverables -- Specify the exact markdown structure expected - -### Quality Standards -- Request specific acceptance criteria for each task -- Require effort estimation with justification -- Ask for comprehensive risk assessment -- Emphasize reviewability and actionability +### Codebase Analysis Requirements + +**Working Directory Context** +- Identify current directory vs project root vs target directory +- Document relationships and required navigation +- Validate implementation location + +**Project Structure** +- Identify type: monorepo/umbrella/multi-language +- Analyze existing architecture and organization +- Map configuration files and dependencies +- Understand technology-specific requirements + +**Critical Analysis Areas** +- Data flow patterns and anti-patterns +- Dependency mapping and circular prevention +- Architectural layers and SRP compliance +- Module responsibilities and concern separation + +**Integration Requirements** +- Respect existing setup and conventions +- Leverage current infrastructure +- Avoid conflicting technologies +- Maintain consistency + +### Project Structure Analysis Tools + +**Directory Context** (MANDATORY FIRST) +- `pwd` - current directory +- `fd -t f -d 3 "mix.exs|package.json|pyproject.toml|Cargo.toml" .` - find project markers +- Document directory relationships + +**Structure Analysis** +- `tree -a -L 3` or `fd -t f -d 3` - map structure +- Configuration discovery +- Technology-specific command locations + +**Pattern Detection** +- `rg "\$parent|this\.\$parent|useParent"` - data flow anti-patterns +- `rg "deps:" apps/*/mix.exs` - dependency mapping +- `rg "cache|log|metric|http|db"` - cross-cutting concerns + +**Documentation Access** +- `mcp_context7_resolve-library-id` - find library IDs +- `mcp_context7_get-library-docs` - retrieve documentation +- Web search for best practices +- GitHub search for examples + +### Technical Research Requirements +- Research compatible, current technologies +- Use MCP Context7 for official documentation +- Evaluate libraries via GitHub metrics +- Consider existing infrastructure compatibility ## Key Principles -When generating prompts: - -1. **Start Consistently**: Always begin with "Please write an agent task plan in markdown file suffix with `-task-plan.md` in `llm/task-plans/` under current working directory that does:" -2. **Be Specific**: Extract concrete requirements from user materials -3. **Preserve Required Details**: Identify and preserve all user-provided information that is necessary for task implementation -4. **Preserve Context**: Include all contextual information that affects implementation -5. **Provide Structure**: Include the complete markdown template -6. **Emphasize Quality**: Request detailed acceptance criteria and estimates -7. **Enable Review**: Ensure the resulting plan will be reviewable by stakeholders -8. **Focus on Action**: Generate prompts that lead to actionable, implementable plans -9. **Enforce Data Flow**: Ensure proper data flow patterns and prevent anti-patterns in all recommendations -10. **CRITICAL: Working Directory Context**: **MANDATORY** - Always emphasize the need to establish correct working directory context to prevent file placement errors - - **Context Confusion Prevention**: Explicitly instruct agents to distinguish between current working directory and project root - - **Directory Relationship Documentation**: Require clear documentation of directory relationships and target implementation context - - **Path Resolution Clarity**: Ensure all file paths are specified with explicit context about which directory they are relative to -11. **Clean Output**: Output ONLY the prompt itself without any introductory or explanatory text - -**CRITICAL SUCCESS FACTOR**: The generated task plan must include all user-provided information that is required for task implementation. Analyze what information is necessary to complete the task successfully, then ensure all relevant URLs, names, values, commands, paths, or implementation details are preserved in the task plan. - -Your prompts should enable any LLM agent to create task plans that are comprehensive, detailed, actionable, and ready for stakeholder review and approval, with ALL necessary implementation details preserved. +1. **Consistency**: Use exact opening phrase and structure +2. **Specificity**: Extract concrete requirements +3. **Preservation**: Maintain ALL task-essential details +4. **Context**: Include implementation-affecting information +5. **Structure**: Provide complete markdown template +6. **Quality**: Request acceptance criteria and estimates +7. **Reviewability**: Enable stakeholder review +8. **Actionability**: Create implementable plans +9. **Working Directory**: Prevent file placement errors +10. **Clean Output**: Only the prompt, no extra text + +**SUCCESS METRIC**: Task plan includes ALL user-provided information necessary for implementation. From 25284258368daeed5e93dcebd8636688b3dc221b Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Thu, 26 Jun 2025 00:08:52 +0800 Subject: [PATCH 08/15] feat: add Anytype integration to Goose LLM config Refactor Goose LLM configuration to replace Notion with Anytype integration. Update `Justfile` to remove `--commit-lock-file` from `update-self-repo` command. Add `ANYTYPE_API_KEY` to Nix Home Manager AI configuration. Review notes: - The change from `notion` to `anytype` in the config file seems to be a direct replacement, ensuring the new integration is properly set up. - The `OPENAPI_MCP_HEADERS` environment variable is correctly configured to include the authorization token and Anytype version. - The `ANYTYPE_API_KEY` is correctly added to the Nix configuration, ensuring it's available for the Goose LLM. - The removal of `--commit-lock-file` from the `just update-self-repo` command is a minor change, but it's good to note. It means the lock file won't be automatically committed after updates, which might require manual handling if lock file changes are intended to be tracked. --- Justfile | 2 +- conf/llm/docs/coding-rules.md | 260 +++++++++++----------------------- conf/llm/goose/config.yaml | 16 ++- flake.lock | 12 +- nix/hm/ai.nix | 1 + 5 files changed, 100 insertions(+), 191 deletions(-) diff --git a/Justfile b/Justfile index 422b90d4..3fa95148 100644 --- a/Justfile +++ b/Justfile @@ -2,4 +2,4 @@ all: just --list @update-self-repo: - nix flake update nix-priv jj-repo --commit-lock-file --refresh + nix flake update nix-priv jj-repo --refresh diff --git a/conf/llm/docs/coding-rules.md b/conf/llm/docs/coding-rules.md index d584e10c..e219240d 100644 --- a/conf/llm/docs/coding-rules.md +++ b/conf/llm/docs/coding-rules.md @@ -1,180 +1,86 @@ # Coding Rules -## General coding preferences - -- Do not add sensitive information about the user or the user's machine to the code or comments -- Use SOLID principles whenever possible, but do not religiously follow them -- Break down large tasks into smaller steps. After completing each step, verify it works before continuing. Do not attempt to complete the entire task in one pass without intermediate validation. -- Prefer rg or fd command when grepping file content or searching for files -- Before running a package manager in the project, please detect which package manager is currently being used, like npm or pnpm -- You can run killport to kill the process that owns a port -- Follow the DRY coding rule -- Never use a variable or method that you are not sure exists -- Do not change the implementation to satisfy the tests -- Consider the impact of changes across the module or file -- Avoid duplicating logic across abstraction boundaries -- Consolidate similar logic into reusable functions -- Maintain a single source of truth for business rules -- When you need to read the terminal history, read the terminal history from the file `~/workspace/term-buffer.txt` if you are in a terminal -- When user request "learn from terminal history", read the terminal history, and analyze the history +## CRITICAL RULES (Always Apply) + +### Response Behavior +- **Answer questions directly**: For instructional queries ("how to...", "what is...", "explain..."), provide answers without modifying files + +### Code Safety +- Never break existing functionality without understanding impact +- Never use variables/methods you're unsure exist +- When changing code, don't remove code you don't understand +- Preserve existing code structure and style unless flawed +- Break large tasks into smaller, verifiable steps + +### Data & Security +- No sensitive user/machine information in code or comments +- **Avoid global dependencies**: Prefer dependency injection and localized state +- **Enforce proper data flow**: Explicit parameter passing > parent component access + +## COMMON DEVELOPMENT TASKS + +### Search & Navigation +- **Search Strategy**: Use `fd` (case-insensitive) for files, `rg` for content. Search by filename first, then content +- Always provide absolute file paths to MCP tools +- Verify patterns across multiple examples for accuracy + +### Code Quality - Make function contracts clear -- First, make the code correct, then make it efficient -- When refactoring, validate behavior preservation -- Favor explicit control flow over hidden dependencies -- Preserve existing code structures and adhere to project coding styles, unless the existing code is flawed. -- Whether responding in code comments, documentation, or UI prompts, always aim to be concise and focused. Eliminate unnecessary details that don't aid understanding or action -- Never remove or modify code in a way that could break existing functionality without fully understanding how those changes will affect the code. -- Please feel free to ask for help if you need more details to decide. -- When searching for relevant modules, first look for relevant files by their file names. Then, use grep to search for specific content. Utilize the fd command to list files in a case-insensitive manner. -- When searching for relevant modules or components in a directory, make sure your search is case insensitive to achieve broader results. -- In Agent mode, automatically apply edits without prompting for confirmation on the first pass -- Always check your implementation and changes against the available documentation or existing code to ensure accuracy and adherence to standards. -- Review the existing style and conventions used in the surrounding code to ensure that the new code aligns well and maintains readability. -- Do not rely on a single example; always verify consistent patterns to ensure accuracy and reliability. -- Avoid making assumptions about how components function without analyzing their actual usage. -- Avoid reproducing only part of a pattern; ensure all critical fields and behaviors are included to maintain integrity and accuracy. -- When changing existing code, do not remove code that you do not understand what it does -- Ensure error handling is consistent and graceful; prefer explicit error propagation over silent failure -- When adding significant logic or refactoring, update or write accompanying documentation if necessary -- Add or update tests to reflect critical logic paths affected by your changes -- Avoid having files over 2000 lines of code, refactor at that point -- **Enforce proper data flow patterns**: Prefer explicit parameter passing over parent component access. Avoid direct parent access (parent refs, DOM traversal, global state guessing) unless using established framework patterns (Context, dependency injection). All data should have a clear, traceable source -- **Avoid global dependencies**: Do not add dependencies to global modules (which will affect a lot of code) or global state (like `window` in JavaScript) unless explicitly required. Prefer dependency injection, explicit imports, and localized state management to maintain code modularity and testability - -## Response Behavior - -- **Answer questions directly**: When the user asks instructional questions (e.g., "how to...", "tell me how to...", "what is...", "explain..."), provide a direct answer rather than modifying files. The user is seeking information, not code changes. Only modify files when explicitly requested to implement, fix, or change something in the codebase. - -## Code Quality Standards - -### Constants Over Magic Numbers -- Replace hard-coded values with named constants -- Use descriptive constant names that explain the value's purpose -- Keep constants at the top of the file or in a dedicated constants file - -### Meaningful Names -- Variables, functions, and classes should reveal their purpose -- Names should explain why something exists and how it's used -- Avoid abbreviations unless they're universally understood - -### Smart Comments -- Don't comment on what the code does - make the code self-documenting -- Use comments to explain why something is done a certain way -- Document APIs, complex algorithms, and non-obvious side effects - -## API Design and Function Signatures - -- **Avoid unclear API signatures with complex object dependencies** - - Never pass entire objects (like `this`, `$store`, component instances) when only specific values are needed - - Function signatures should clearly indicate what data is required without forcing callers to understand internal implementation - - Each parameter should have a clear, single purpose that's evident from the parameter name and type - - Example (bad): `downloadResume(candidateData, applicationId, $store, componentInstance)` - - Unclear what properties of candidateData are needed - - Unclear what methods/properties of $store are required - - Unclear why componentInstance is needed - - Example (good): `downloadResume(candidateId, candidateName, applicationId, authToken, onProgress)` - - Clear what specific values are needed - - Clear contracts for each parameter - - Easy to test and reuse - -- **Prefer minimal function dependencies and primitive arguments** - - Functions should accept only the minimal data they actually need - - When a function only needs a simple value (boolean, string, number), pass that value directly instead of passing a complex object that contains it - - This reduces coupling, improves testability, and makes function contracts clearer - - Example (bad): `function foo(arg1, store) { const isAggCompany = store.isAggCompany(); }` - - Example (good): `function foo(arg1, isAggCompany) { // use isAggCompany directly }` - - Exception: When a function needs multiple related values from the same object, it may be acceptable to pass the object, but with good documentation - -- **Make API contracts self-documenting** - - Function names and parameter names should clearly indicate what they do and what they expect - - Avoid passing context objects unless absolutely necessary - - If you must pass an object, document exactly which properties are used - - Use TypeScript interfaces or JSDoc to specify the exact shape of required data - -## MCP Tools and External Services - -### Tool Selection and Usage -- Prefer `rg` (ripgrep) over grep when searching file content -- Prefer `fd` over find when searching for files -- Use `fd` command with case-insensitive search for broader results when looking for modules or components -- Always provide absolute file paths to MCP tools to avoid permission errors -- Use `killport ` to kill processes that own a specific port - -### Package Management -- Before running a package manager in the project, detect which package manager is currently being used (npm, pnpm, yarn, etc.) - -### Documentation and Code Search -- Use MCP context7 to search for library and framework documentation -- Use MCP github-mcp-server to search for code in GitHub repositories -- When searching for relevant modules, first look for relevant files by their file names, then use grep to search for specific content - -### Notion Note Management -- **Auto-save workflow**: When the user says "save notion note", "save to notion", or similar requests, automatically: - 1. Summarize the previous answer or conversation context into a clear, well-structured note - 2. Use the known "Quick Notes" page ID (`216edc511d028016b21ee00eace33af7`) to create a new page directly under it - 3. Format the content using Notion-flavored Markdown for better readability - 4. Include a descriptive title based on the topic discussed -- **Fallback logic**: If creating under "Quick Notes" fails (e.g., page doesn't exist or access denied): - 1. Search for "Quick Notes" page using notion mcp tools as backup - 2. If search also fails, create pages as workspace-level private pages -- **Content structure**: Ensure saved notes have: - - Clear, descriptive title that describes the topic/content - - Well-organized content with proper headings (##, ###) - - Key points highlighted or bulleted for easy scanning - - Code examples in proper code blocks when relevant - - Context preserved so the note is useful when reviewed later -- **No confirmation needed**: Execute the save operation immediately without asking for user confirmation to maintain workflow efficiency -- **Success feedback**: After saving, provide the Notion page URL to the user - -### Terminal Output Analysis and Keynote Creation -- When the user asks for terminal/term output analysis or keynote creation or similar requests: - 1. Read the terminal output/history from `~/workspace/term-buffer.txt` - 2. Analyze the session to identify the main topic or goal the user was working on - 3. Track the progression from errors/attempts to successful solutions - 4. Create a keynote summary that includes: - - The main topic/objective the user was working on - - Key errors or challenges encountered - - The correct/final working solution or command - - Important insights or learnings from the session - 5. Include context about what didn't work and why the final solution worked - 6. Save the keynote to the `~/workspace/terminal-keynote.md` file - 7. If the user ask to save to notion, save the keynote to the notion too. - -### Search Strategy -- When searching for relevant modules or components in a directory, make sure your search is case insensitive to achieve broader results -- Always check implementation and changes against available documentation or existing code to ensure accuracy and adherence to standards -- Review existing style and conventions used in surrounding code to ensure new code aligns well and maintains readability -- Do not rely on a single example; always verify consistent patterns to ensure accuracy and reliability -- Avoid making assumptions about how components function without analyzing their actual usage -- Avoid reproducing only part of a pattern; ensure all critical fields and behaviors are included to maintain integrity and accuracy - -## Coding Workflow Preferences - -- **Confirm command sequences**: Before running a sequence of commands, always ask the user for confirmation at the first command. This prevents issues like dev servers being started when previous servers are still running -- Focus on the areas of code relevant to the task -- Do not touch code that is unrelated to the task -- Follow Test-Driven Development (TDD) principles, i.e. start with the test and then implement the code -- Avoid making major changes to the patterns and architecture of how a feature works, after it has shown to work well, unless explicitly instructed -- Always think about what other methods and areas of code might be affected by code changes -- After each code change, commit the changes following conventional commit for the git message -- In Agent mode, automatically apply edits without prompting for confirmation on the first pass - -## Testing Convention - -- Behavior-Driven Development (BDD) testing methodology -- Clean code and test design principles -- Structure each test with clear GIVEN, WHEN, THEN sections -- Use descriptive test method names that reflect the scenario -- Implement test setup with meaningful variable names -- Ensure tests are isolated and focused on a single behavior -- If some files are needed to implement the test, ask the user to include them in the context -- Avoid deduplicating mock logic in tests; reimplement when needed to preserve clarity - -### Tests Output Format - -- Clear section comments (// GIVEN, // WHEN, // THEN) -- Create test methods using the BDD pattern -- Descriptive method and variable names -- Use `actual` as variable name if the tested method returns something -- Use `expected` as variable name for the expected output -- Helper methods prefixed with `given_` for test setup and `then_` for test assertions where appropriate +- First make code correct, then efficient +- Follow DRY, but not religiously SOLID +- Constants over magic numbers with descriptive names +- Self-documenting code > comments (except for "why" explanations) +- Files should not exceed 2000 lines + +### API Design +**✗ Bad**: `downloadResume(candidateData, $store, componentInstance)` +**✓ Good**: `downloadResume(candidateId, candidateName, authToken)` +- Pass only needed primitive values, not entire objects +- Clear parameter names that reveal purpose +- Document exact properties if object passing is necessary + +### Testing +- BDD methodology: GIVEN/WHEN/THEN structure +- Descriptive test names reflecting scenarios +- Use `actual` for test results, `expected` for assertions +- One behavior per test, avoid deduplicating mock logic + +## TOOL PREFERENCES + +### Commands +- Search: `rg` > grep, `fd` > find +- Kill port: `killport ` +- Package manager: Detect before use (npm/pnpm/yarn) + +### MCP Services +- **context7**: Library/framework documentation +- **github-mcp-server**: GitHub code search +- **filesystem**: Use absolute paths + +## SPECIFIC WORKFLOWS + +### Anytype Notes +**Triggers**: "save to note", "save to anytype", "save note" +**Action**: Create page with `space_id: bafyreibmeyechdodo2ruztxlqjsd7zmqvrzcwh5oc7ybj6xr4ol35z4fum.1kpp1h2cp2ek2`, add to `list_id: bafyreihgbvc5clgh5vlsmdtm6nfmet53j73blogtlgljt2s4xdoxptxriu` +**Format**: Clear title, organized headings, bulleted key points, code blocks +**Behavior**: Execute immediately, no confirmation needed + +### Terminal Analysis +**Trigger**: "terminal/term output analysis", "keynote creation" +**Steps**: +1. Read `~/workspace/term-buffer.txt` +2. Identify main topic and track error→solution progression +3. Create keynote with: objective, challenges, solution, insights +4. Save to `~/workspace/terminal-keynote.md` (or Anytype if requested) + +## WORKFLOW PREFERENCES + +- **Focus**: Only modify code relevant to the task +- **Architecture**: Avoid major pattern changes unless instructed + +## ERROR HANDLING & VALIDATION + +- Explicit error propagation > silent failure +- Validate behavior preservation during refactoring +- Update docs/tests for significant changes +- Ask for help when details needed for decisions diff --git a/conf/llm/goose/config.yaml b/conf/llm/goose/config.yaml index 45725878..acc89cb1 100644 --- a/conf/llm/goose/config.yaml +++ b/conf/llm/goose/config.yaml @@ -1,6 +1,6 @@ GOOSE_PLANNER_PROVIDER: openrouter -GOOSE_PLANNER_MODEL: anthropic/claude-sonnet-4:floor -GOOSE_LEAD_MODEL: anthropic/claude-3.7-sonnet:floor +GOOSE_PLANNER_MODEL: anthropic/claude-sonnet-4 +GOOSE_LEAD_MODEL: anthropic/claude-3.7-sonnet GOOSE_MODE: auto GOOSE_PROVIDER: openrouter # GOOSE_MODEL: google/gemini-2.5-pro-preview-06-05 @@ -57,16 +57,18 @@ extensions: name: context7 timeout: 300 type: stdio - notion: + anytype: type: stdio - display_name: Notion - name: notion + display_name: Anytype + name: anytype timeout: 900 enabled: true cmd: bunx args: - - "mcp-remote" - - "https://mcp.notion.com/sse" + - "@anyproto/anytype-mcp@latest" + env_keys: ["OPENAPI_MCP_HEADERS"] + envs: + OPENAPI_MCP_HEADERS: "{\"Authorization\":\"Bearer @ANYTYPE_API_KEY@\", \"Anytype-Version\":\"2025-05-20\"}" playwright: args: - "@executeautomation/playwright-mcp-server" diff --git a/flake.lock b/flake.lock index 3532df9d..1f48a54e 100644 --- a/flake.lock +++ b/flake.lock @@ -464,10 +464,10 @@ "rust-overlay": "rust-overlay" }, "locked": { - "lastModified": 1750638421, - "narHash": "sha256-AvSzcirthitS9k4bv31HUMa8mGZLJd1M3bNgXV48YeE=", + "lastModified": 1750726885, + "narHash": "sha256-mKiRhaaL8g2p6sO/56rS0xDvXLKsGX6gHCXncAOmTiY=", "ref": "refs/heads/main", - "rev": "bc83eb0a4837bdec89792967dd28674141f4a364", + "rev": "6323764f6916ae404cbeef8f3d2cdbb7af0b981a", "shallow": true, "type": "git", "url": "ssh://git@github.com/pze/jj.git" @@ -531,10 +531,10 @@ ] }, "locked": { - "lastModified": 1750136408, - "narHash": "sha256-bVm8iGIqR3eSMr5yR64nwAY0C7lvvRhpZuFgxN7HuL4=", + "lastModified": 1750867471, + "narHash": "sha256-kg4hpSgcku69Llej+Q+K7SZA7BPQrfVuBmJvTqlCN6M=", "ref": "refs/heads/main", - "rev": "7d8c0282073b9630360f9fc936de6de5d233259f", + "rev": "7e01ef4794d41b87fd4e2a61c6deb90fcb331011", "shallow": true, "type": "git", "url": "ssh://git@github.com/towry/nix-priv.git" diff --git a/nix/hm/ai.nix b/nix/hm/ai.nix index 3c9ab15b..44486ad4 100644 --- a/nix/hm/ai.nix +++ b/nix/hm/ai.nix @@ -54,6 +54,7 @@ in source = pkgs.replaceVars ../../conf/llm/goose/config.yaml { GITHUB_PERSONAL_ACCESS_TOKEN = pkgs.nix-priv.keys.github.accessToken; BRAVE_API_KEY = pkgs.nix-priv.keys.braveSearch.apiKey; + ANYTYPE_API_KEY = pkgs.nix-priv.keys.anytype.apiKey; }; }; "goose/.goosehints" = { From 69aa8ca3b469a4c0501a1f2653300f4b1330aee9 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Thu, 26 Jun 2025 01:13:19 +0800 Subject: [PATCH 09/15] fix: update Claude Sonnet model to 3.7 Review notes: - The model name was updated from `claude-sonnet-4` to `claude-3.7-sonnet`. This is a minor version bump and should be compatible. - Ensure that the new model version `claude-3.7-sonnet` is indeed the correct and intended model to use, as `claude-sonnet-4` might refer to a different, potentially newer, internal alias or a future model. - Verify that this change does not negatively impact the prompt's performance or output quality. --- conf/llm/aichat/roles/gen-prompt.md | 2 +- conf/llm/goose/config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/llm/aichat/roles/gen-prompt.md b/conf/llm/aichat/roles/gen-prompt.md index 32f433c3..c92f5bf6 100644 --- a/conf/llm/aichat/roles/gen-prompt.md +++ b/conf/llm/aichat/roles/gen-prompt.md @@ -1,5 +1,5 @@ --- -model: openrouter:anthropic/claude-sonnet-4:floor +model: openrouter:anthropic/claude-3.7-sonnet:floor temperature: 0.1 top_p: 0.2 --- diff --git a/conf/llm/goose/config.yaml b/conf/llm/goose/config.yaml index acc89cb1..f2c927c6 100644 --- a/conf/llm/goose/config.yaml +++ b/conf/llm/goose/config.yaml @@ -34,7 +34,7 @@ extensions: tutorial: bundled: true display_name: Tutorial - enabled: true + enabled: false name: tutorial timeout: 500 type: builtin From 7c0c7c2d50a71ffc7a2b65904fedc23df84f263c Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Thu, 26 Jun 2025 16:26:49 +0800 Subject: [PATCH 10/15] fix: ensure temporary prompt file is always removed Review notes: - The previous logic could leave temporary files in /tmp if the user aborted the editor without saving content. - The new approach ensures the temporary file is removed in both success and failure cases. - The temporary file is now stored in a dedicated directory with a timestamp, which is good for debugging if issues arise before deletion. - Consider using `mktemp -d` to create a temporary directory and then a file inside it, which might be cleaner for managing temporary resources. --- conf/llm/docs/coding-rules.md | 2 +- nix/hm/ai.nix | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/conf/llm/docs/coding-rules.md b/conf/llm/docs/coding-rules.md index e219240d..d4c9ed4b 100644 --- a/conf/llm/docs/coding-rules.md +++ b/conf/llm/docs/coding-rules.md @@ -49,7 +49,7 @@ ### Commands - Search: `rg` > grep, `fd` > find -- Kill port: `killport ` +- Kill port: `killport ` when you need to free a port - Package manager: Detect before use (npm/pnpm/yarn) ### MCP Services diff --git a/nix/hm/ai.nix b/nix/hm/ai.nix index 44486ad4..f80c5b9c 100644 --- a/nix/hm/ai.nix +++ b/nix/hm/ai.nix @@ -25,18 +25,20 @@ in }; functions = { gen-task-prompt = '' - set tmpfile (mktemp) + mkdir -p /tmp/llm-task-prompt + set timestamp (date +%Y-%m-%d-%H-%M-%S) + set tmpfile "/tmp/llm-task-prompt/$timestamp.md" $EDITOR $tmpfile if test -s $tmpfile mkdir -p llm/task-plan-prompts - set timestamp (date +%Y%m%d_%H%M%S) set output_file "llm/task-plan-prompts/task_plan_$timestamp.md" cat $tmpfile | aichat --role gen-prompt > $output_file echo "Task plan generated: $output_file" + echo "Original prompt saved: $tmpfile" else echo "No content provided, aborting." + rm -f $tmpfile end - rm $tmpfile ''; }; }; From e3e9dedadd7e338a7ed0c94a3d1dd08b1a6d39c8 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Fri, 27 Jun 2025 09:11:01 +0800 Subject: [PATCH 11/15] feat(fzf): add alt-j keybinding for jump command --- nix/hm/fzf.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/hm/fzf.nix b/nix/hm/fzf.nix index 3f4085e5..930cc357 100644 --- a/nix/hm/fzf.nix +++ b/nix/hm/fzf.nix @@ -44,6 +44,7 @@ in "--preview-window='sharp,right,border-left,<70(bottom,50%,border-top)'" # this keybind should match the telescope ones in nvim config ''--bind="${lib.concatStringsSep "," fzf-key-bindings}"'' + "--bind=alt-j:jump" ]; fileWidgetCommand = "${pkgs.ripgrep}/bin/rg --files"; fileWidgetOptions = [ From 3ed4ec71c08b1ebd5b37ea456e73cebb659b1431 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Fri, 27 Jun 2025 12:47:04 +0800 Subject: [PATCH 12/15] feat: add task plan review prompt and script Add a detailed prompt for LLM-based task plan reviews, including review process, report format, and guidelines. Introduce a new `goose-review-plan` script to facilitate the review process. Review notes: - The prompt provides a comprehensive structure for task plan reviews, which should improve consistency and quality of LLM outputs. - The `goose-review-plan` script integrates well with the existing `goose` CLI, making the new functionality easily accessible. - Ensure the LLM is fine-tuned or capable enough to follow the detailed instructions and generate reports in the specified format. - Consider adding examples of good and bad task plans to the prompt for better context. - The script currently checks for argument count; ensure robust error handling for file existence and readability. --- conf/llm/docs/prompts/task-plan-review.md | 211 ++++++++++++++++++++++ nix/hm/ai.nix | 46 +++++ 2 files changed, 257 insertions(+) create mode 100644 conf/llm/docs/prompts/task-plan-review.md diff --git a/conf/llm/docs/prompts/task-plan-review.md b/conf/llm/docs/prompts/task-plan-review.md new file mode 100644 index 00000000..2b946f5b --- /dev/null +++ b/conf/llm/docs/prompts/task-plan-review.md @@ -0,0 +1,211 @@ +You are a technical reviewer tasked with analyzing task plan documents. Your role is to thoroughly review the provided task plan and generate a comprehensive review report. + +## Core Principle: Verify, Don't Speculate + +**GOLDEN RULE**: Only report issues you have personally verified with evidence. If you haven't checked it yourself, don't report it. + +This means: +- ✅ "I checked the docs at [URL] and found X contradicts the plan's claim of Y" +- ❌ "The docs probably don't mention this" +- ❌ "This module might not exist" +- ❌ "The documentation should include..." + +## Context Awareness + +Before reviewing, understand: +- **What is being built**: Development tool, production system, library, etc. +- **The scope**: Integration, new development, migration, etc. +- **The purpose**: Who will use it and how +- **The environment**: Development, staging, production, or all + +Only raise issues that are RELEVANT to the actual context. Don't suggest production hardening for development tools, enterprise scaling for prototypes, or complex architectures for simple scripts. + +## Review Process + +When reviewing a task plan, follow these steps: + +### 1. Assertion Verification +- Examine each assertion or claim made in the task plan +- **ONLY report assertions that you have ACTUALLY VERIFIED to be incorrect** +- To verify an assertion: + - You MUST access and read the actual documentation/source + - You MUST find the specific information that contradicts the assertion + - You MUST quote the exact text from the source that proves the assertion wrong + - **You MUST document HOW you accessed the source**: + - For documentation: Provide the exact URL you visited + - For code: Show the file path and how you searched/found it + - For APIs: Show the endpoint you queried + - For tools: Show the command you ran (e.g., `man fzf`, `npm info package`) + - **For MCP tools**: List the exact MCP service and function used (e.g., `Used mcp_context7_get-library-docs with libraryID '/vercel/next.js'`) +- **DO NOT**: + - Make up version numbers or information + - Claim something is in documentation without actually finding it + - Report "unverifiable" assertions - if you can't verify it, don't report it + - Suggest checking sources - YOU must do the checking + - Reference documentation you haven't personally accessed + - Use cached or remembered information - always verify fresh + - **Claim documentation is missing something unless you've read the ENTIRE relevant documentation** + - **Report that documentation "should" contain something it doesn't - focus on what the plan claims vs what actually exists** + +### 2. Technical Solution Analysis +- Evaluate the proposed technical approach **IN CONTEXT** +- Identify CONCRETE issues with EVIDENCE that are RELEVANT to the use case: + - Documented incompatibilities that will affect the specific implementation + - Known limitations that matter for the intended usage + - Missing steps that will cause actual failure in the target environment +- **DO NOT raise**: + - Production concerns for development tools + - Enterprise features for simple integrations + - Theoretical edge cases that don't apply to the use case + - Generic "best practices" without specific relevance + +### 3. Implementation Feasibility +- Identify missing steps based on documented requirements +- Point out prerequisites explicitly stated in official docs +- Flag unrealistic timelines only with evidence +- Focus on what will actually block or break the implementation + +## Review Report Format + +Generate your review report using this clear, flat structure: + +``` +## Task Plan Review Report + +### Summary +[Brief overview of the task plan and overall assessment] + +### Context Understanding +- **Task Type**: [What is being built/integrated] +- **Target Environment**: [Development/Production/Both] +- **Primary Goal**: [What success looks like] + +### Verified Assertion Errors +[ONLY include if you found actual errors with proof] +- ✗ **[Incorrect assertion from plan]**: + - **What the plan claims**: [Exact quote from plan] + - **Actual fact**: [What you verified to be true] + - **How I verified**: [Exact steps: "I ran `npm info react` and checked the versions field" or "I visited https://react.dev/reference/react and found..." or "I used MCP tool `mcp_context7_get-library-docs` with libraryID '/facebook/react'"] + - **Proof**: "[Exact quote from documentation]" - [Source link, section/page] + +### Practical Technical Issues +[ONLY issues that will actually affect the implementation] +- **Issue #1**: [Specific problem that will occur] + - **Why it matters here**: [How it affects THIS specific use case] + - **Evidence**: "[Quote from documentation]" - [Source link] + - **Practical fix**: [Simple solution appropriate to the context] + +### Missing Critical Steps +[Only steps that will cause actual failure if omitted] +- **Missing**: [Required step from documentation] + - **Source**: "[Quote requiring this step]" - [Documentation link] + - **What breaks without it**: [Specific failure in this context] + +### Recommendations +[Only practical, context-appropriate suggestions] +1. [Specific action relevant to the task] - Based on: [Documentation reference] +2. [Another relevant action] - Based on: [Documentation reference] + +### Conclusion +[Final assessment focused on whether the plan will achieve its stated goals] +``` + +## Critical Rules + +1. **Context-Appropriate Review**: + - Development tool integrations don't need production failover + - Simple integrations don't need enterprise architecture + - Prototypes don't need infinite scalability + - Match your concerns to the actual use case + +2. **No Fabrication - Show Your Work**: + - NEVER claim something exists in documentation without finding it + - NEVER invent version numbers, features, or requirements + - If you haven't read it, don't reference it + - **ALWAYS document HOW you verified information**: + - "I checked the official docs at [URL]..." + - "I ran the command `[command]` and got..." + - "I searched the codebase using `[search command]` and found..." + - "I used MCP tool `[tool_name]` with parameters `[params]`..." + - If you can't access a source, say so - don't pretend you did + +3. **Evidence Required**: + - Every assertion error must include an exact quote proving it wrong + - Every technical issue must reference actual documentation + - No speculation or "best practices" without sources + +4. **Practical Focus**: + - Will this actually break the implementation? + - Does this matter for the stated use case? + - Is the fix proportional to the problem? + +## Examples + +### Good Assertion Verification: +✅ **RIGHT**: +- ✗ **"React 19 requires Node.js 14+"**: + - **What the plan claims**: React 19 requires Node.js 14+ + - **Actual fact**: React 19 requires Node.js 18.17.0+ + - **How I verified**: I visited https://react.dev/blog/2024/04/25/react-19#requirements and searched for "Node.js" + - **Proof**: "React 19 requires Node.js 18.17.0 or later" - https://react.dev/blog/2024/04/25/react-19#requirements + +✅ **RIGHT** (using MCP tools): +- ✗ **"Next.js 14 supports React 16+"**: + - **What the plan claims**: Next.js 14 supports React 16+ + - **Actual fact**: Next.js 14 requires React 18.2.0 or higher + - **How I verified**: I used MCP tool `mcp_context7_get-library-docs` with libraryID '/vercel/next.js/v14.0.0' and searched for React version requirements + - **Proof**: "Next.js 14 requires react@^18.2.0 and react-dom@^18.2.0" - Next.js 14 documentation + +### Bad Assertion Verification: +❌ **WRONG**: "The documentation says React 19 needs Node 18" (no proof of access) +❌ **WRONG**: "Based on my knowledge, React 19 requires..." (using memory, not verification) +❌ **WRONG**: "React 19 probably needs Node 18 or higher" (speculation) +❌ **WRONG**: "The documentation should clarify version compatibility" (imposing requirements on docs) +❌ **WRONG**: "The plan references installation guide without verifying steps" (vague, no specific error found) +❌ **WRONG**: "The documentation doesn't mention X module" (without showing you searched for it) + +### More Bad Examples to Avoid: +❌ **WRONG** (Claiming missing documentation): +- ✗ **"The plan uses LiveVue.SSR.NodeJS which isn't documented"**: + - **Why it's wrong**: Made claim without actually searching for the module documentation + - **What you should do**: Search for "LiveVue.SSR.NodeJS" in docs, check the API reference, use web search + +❌ **WRONG** (Imposing documentation requirements): +- ✗ **"The installation guide should have version compatibility matrix"**: + - **Why it's wrong**: This is imposing your opinion on what docs "should" have, not verifying what the plan claims + - **What you should do**: Only report if the plan makes a false claim about what's in the docs + +❌ **WRONG** (Unverified module existence): +- ✗ **"The plan proposes using modules that don't exist"**: + - **Why it's wrong**: Claimed non-existence without showing verification attempts + - **What you should do**: Search documentation, check source code, use package manager to verify + +❌ **WRONG**: "Add Node.js failover for SSR" (for a dev tool integration) +✅ **RIGHT**: "The webpack config conflicts with Vite - see [doc link]" + +❌ **WRONG**: "Consider microservices architecture" (for a simple library integration) +✅ **RIGHT**: "Missing required peer dependency 'vue@^3.0.0'" + +❌ **WRONG**: "Implement comprehensive monitoring" (for a development environment) +✅ **RIGHT**: [Skip it - not relevant to the context] + +### Context-Appropriate Technical Issues: +✅ **RIGHT** (Real compatibility issue with proof): +- **Issue**: "The plan uses React 18 hooks with React 16" + - **Why it matters**: Will cause runtime errors + - **Evidence**: "useId() was introduced in React 18" - https://react.dev/reference/react/useId + - **Practical fix**: Update to React 18 or use alternative approach + +❌ **WRONG** (False positive about configuration): +- **Issue**: "Plan doesn't address Tailwind conflicts with Vite" + - **Why it's wrong**: The plan DOES include Tailwind configuration in Step 2.3 + - **What you missed**: You didn't read the full plan before reporting + +❌ **WRONG** (Imposing additional requirements): +- **Issue**: "Plan should include TypeScript configuration" + - **Why it's wrong**: Unless the plan claims to support TypeScript but doesn't configure it, this isn't an error + - **Remember**: Report what's wrong, not what could be added + +## Final Reminder + +Focus on what will actually help or hinder the specific task at hand. Quality over quantity - one relevant, actionable issue is better than ten theoretical concerns that don't apply to the context. diff --git a/nix/hm/ai.nix b/nix/hm/ai.nix index f80c5b9c..786e7ad2 100644 --- a/nix/hm/ai.nix +++ b/nix/hm/ai.nix @@ -12,6 +12,9 @@ let else "${config.home.homeDirectory}/.config/aichat"; + # Read and escape the system prompt for shell usage + taskPlanReviewPrompt = lib.escapeShellArg (builtins.readFile ../../conf/llm/docs/prompts/task-plan-review.md); + in { home.packages = with pkgs; [ @@ -40,6 +43,49 @@ in rm -f $tmpfile end ''; + goose-review-plan = { + description = "Review task plan"; + + body = '' + # Parse arguments + set -l interactive_mode "" + set -l task_plan "" + + # Process arguments + set -l i 1 + while test $i -le (count $argv) + switch $argv[$i] + case -i + set interactive_mode "--interactive" + case '*' + if test -z "$task_plan" + set task_plan $argv[$i] + end + end + set i (math $i + 1) + end + + # Check if task plan was provided + if test -z "$task_plan" + echo "Usage: goose-review-plan [-i] " + echo " -i Enable interactive mode after review" + return 1 + end + + if not test -f $task_plan + echo "Error: Task plan file '$task_plan' not found" + return 1 + end + + echo "Reviewing task plan: $task_plan" + if test -n "$interactive_mode" + echo "Interactive mode enabled" + end + echo "Using goose to analyze..." + + goose run $interactive_mode --max-tool-repetitions 50 --system ${taskPlanReviewPrompt} --text "Please review the task plan: $task_plan" + ''; + }; }; }; From 38810682dbdc7ca647ecd69b518f7771a62a1762 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Fri, 27 Jun 2025 14:20:26 +0800 Subject: [PATCH 13/15] refactor(ai): improve goose command construction Review notes: - The change correctly addresses the issue of `interactive_mode` not being passed to `goose` when it's an empty string. - Using `set -l` and `eval` for command construction is a common pattern in fish shell for dynamic commands. - The previous single-line command was brittle and would fail if `$interactive_mode` was empty, as it would pass an empty string as an argument to `goose`. The new approach correctly omits the `--interactive` flag when `$interactive_mode` is empty. --- conf/llm/docs/prompts/task-plan-review.md | 42 +++++----------- nix/hm/ai.nix | 59 ++++++++++++++++++++--- nix/hm/tmux.nix | 4 +- 3 files changed, 68 insertions(+), 37 deletions(-) diff --git a/conf/llm/docs/prompts/task-plan-review.md b/conf/llm/docs/prompts/task-plan-review.md index 2b946f5b..2dd97b54 100644 --- a/conf/llm/docs/prompts/task-plan-review.md +++ b/conf/llm/docs/prompts/task-plan-review.md @@ -27,6 +27,14 @@ When reviewing a task plan, follow these steps: ### 1. Assertion Verification - Examine each assertion or claim made in the task plan - **ONLY report assertions that you have ACTUALLY VERIFIED to be incorrect** +- **ONLY report assertions that the task plan explicitly makes with exact quotes** +- **An assertion error means the plan claims something FALSE or CONTRADICTORY** +- **You MUST quote the exact text from the task plan** - no paraphrasing, interpretation, or summarizing +- **An assertion error is NOT**: + - A requirement that is satisfied (e.g., "needs Node 18+" when Node 22 is installed) + - A missing feature you think should be added + - A suggestion for improvement + - Something that works but could be done differently - To verify an assertion: - You MUST access and read the actual documentation/source - You MUST find the specific information that contradicts the assertion @@ -46,6 +54,7 @@ When reviewing a task plan, follow these steps: - Use cached or remembered information - always verify fresh - **Claim documentation is missing something unless you've read the ENTIRE relevant documentation** - **Report that documentation "should" contain something it doesn't - focus on what the plan claims vs what actually exists** + - **Report satisfied requirements as errors (e.g., "needs Node 18+" is NOT wrong when Node 22 is installed)** ### 2. Technical Solution Analysis - Evaluate the proposed technical approach **IN CONTEXT** @@ -83,7 +92,7 @@ Generate your review report using this clear, flat structure: ### Verified Assertion Errors [ONLY include if you found actual errors with proof] - ✗ **[Incorrect assertion from plan]**: - - **What the plan claims**: [Exact quote from plan] + - **What the plan claims**: "[EXACT QUOTE from the task plan - must be verbatim text, not interpretation]" - **Actual fact**: [What you verified to be true] - **How I verified**: [Exact steps: "I ran `npm info react` and checked the versions field" or "I visited https://react.dev/reference/react and found..." or "I used MCP tool `mcp_context7_get-library-docs` with libraryID '/facebook/react'"] - **Proof**: "[Exact quote from documentation]" - [Source link, section/page] @@ -144,14 +153,14 @@ Generate your review report using this clear, flat structure: ### Good Assertion Verification: ✅ **RIGHT**: - ✗ **"React 19 requires Node.js 14+"**: - - **What the plan claims**: React 19 requires Node.js 14+ + - **What the plan claims**: "React 19 requires Node.js 14+" - **Actual fact**: React 19 requires Node.js 18.17.0+ - **How I verified**: I visited https://react.dev/blog/2024/04/25/react-19#requirements and searched for "Node.js" - **Proof**: "React 19 requires Node.js 18.17.0 or later" - https://react.dev/blog/2024/04/25/react-19#requirements ✅ **RIGHT** (using MCP tools): - ✗ **"Next.js 14 supports React 16+"**: - - **What the plan claims**: Next.js 14 supports React 16+ + - **What the plan claims**: "Next.js 14 supports React 16+" - **Actual fact**: Next.js 14 requires React 18.2.0 or higher - **How I verified**: I used MCP tool `mcp_context7_get-library-docs` with libraryID '/vercel/next.js/v14.0.0' and searched for React version requirements - **Proof**: "Next.js 14 requires react@^18.2.0 and react-dom@^18.2.0" - Next.js 14 documentation @@ -163,22 +172,7 @@ Generate your review report using this clear, flat structure: ❌ **WRONG**: "The documentation should clarify version compatibility" (imposing requirements on docs) ❌ **WRONG**: "The plan references installation guide without verifying steps" (vague, no specific error found) ❌ **WRONG**: "The documentation doesn't mention X module" (without showing you searched for it) - -### More Bad Examples to Avoid: -❌ **WRONG** (Claiming missing documentation): -- ✗ **"The plan uses LiveVue.SSR.NodeJS which isn't documented"**: - - **Why it's wrong**: Made claim without actually searching for the module documentation - - **What you should do**: Search for "LiveVue.SSR.NodeJS" in docs, check the API reference, use web search - -❌ **WRONG** (Imposing documentation requirements): -- ✗ **"The installation guide should have version compatibility matrix"**: - - **Why it's wrong**: This is imposing your opinion on what docs "should" have, not verifying what the plan claims - - **What you should do**: Only report if the plan makes a false claim about what's in the docs - -❌ **WRONG** (Unverified module existence): -- ✗ **"The plan proposes using modules that don't exist"**: - - **Why it's wrong**: Claimed non-existence without showing verification attempts - - **What you should do**: Search documentation, check source code, use package manager to verify +❌ **WRONG**: Paraphrasing the plan - e.g., claiming plan says "The template will be clean" when it actually says "Remove all Vuex imports from component template" ❌ **WRONG**: "Add Node.js failover for SSR" (for a dev tool integration) ✅ **RIGHT**: "The webpack config conflicts with Vite - see [doc link]" @@ -196,16 +190,6 @@ Generate your review report using this clear, flat structure: - **Evidence**: "useId() was introduced in React 18" - https://react.dev/reference/react/useId - **Practical fix**: Update to React 18 or use alternative approach -❌ **WRONG** (False positive about configuration): -- **Issue**: "Plan doesn't address Tailwind conflicts with Vite" - - **Why it's wrong**: The plan DOES include Tailwind configuration in Step 2.3 - - **What you missed**: You didn't read the full plan before reporting - -❌ **WRONG** (Imposing additional requirements): -- **Issue**: "Plan should include TypeScript configuration" - - **Why it's wrong**: Unless the plan claims to support TypeScript but doesn't configure it, this isn't an error - - **Remember**: Report what's wrong, not what could be added - ## Final Reminder Focus on what will actually help or hinder the specific task at hand. Quality over quantity - one relevant, actionable issue is better than ten theoretical concerns that don't apply to the context. diff --git a/nix/hm/ai.nix b/nix/hm/ai.nix index 786e7ad2..295d84cb 100644 --- a/nix/hm/ai.nix +++ b/nix/hm/ai.nix @@ -12,9 +12,6 @@ let else "${config.home.homeDirectory}/.config/aichat"; - # Read and escape the system prompt for shell usage - taskPlanReviewPrompt = lib.escapeShellArg (builtins.readFile ../../conf/llm/docs/prompts/task-plan-review.md); - in { home.packages = with pkgs; [ @@ -49,6 +46,7 @@ in body = '' # Parse arguments set -l interactive_mode "" + set -l save_mode "" set -l task_plan "" # Process arguments @@ -57,6 +55,8 @@ in switch $argv[$i] case -i set interactive_mode "--interactive" + case --save + set save_mode "1" case '*' if test -z "$task_plan" set task_plan $argv[$i] @@ -67,8 +67,9 @@ in # Check if task plan was provided if test -z "$task_plan" - echo "Usage: goose-review-plan [-i] " - echo " -i Enable interactive mode after review" + echo "Usage: goose-review-plan [-i] [--save] " + echo " -i Enable interactive mode after review" + echo " --save Save the review report to task plan directory" return 1 end @@ -81,9 +82,52 @@ in if test -n "$interactive_mode" echo "Interactive mode enabled" end + if test -n "$save_mode" + echo "Save mode enabled - review report will be saved" + end echo "Using goose to analyze..." - goose run $interactive_mode --max-tool-repetitions 50 --system ${taskPlanReviewPrompt} --text "Please review the task plan: $task_plan" + # Build the command + set -l goose_cmd "goose run" + if test -n "$interactive_mode" + set goose_cmd $goose_cmd "--interactive" + end + + # Read the system prompt from the config file + set -l prompt_file "${config.xdg.configHome}/goose/task-plan-review-prompt.md" + if not test -f $prompt_file + echo "Error: Task plan review prompt not found at $prompt_file" + return 1 + end + + # Read the prompt content + set -l system_prompt (cat $prompt_file) + + # Build the text instruction + set -l text_instruction "Please review the task plan in: $task_plan \n +Read the task plan file directly without checking its existence first." + if test -n "$save_mode" + # Generate the review report filename + set -l task_plan_dir (dirname $task_plan) + set -l task_plan_basename (basename $task_plan) + set -l task_plan_name (string replace -r "\\.[^.]*\$" "" $task_plan_basename) + set -l review_report_path "$task_plan_dir/$task_plan_name-review-report.md" + set text_instruction "$text_instruction + +After completing the review, save the review report to: $review_report_path" + end + + # Build the full command arguments + set -l goose_args + if test -n "$interactive_mode" + set goose_args $goose_args --interactive + end + set goose_args $goose_args --max-tool-repetitions 50 + set goose_args $goose_args --system "$system_prompt" + set goose_args $goose_args --text "$text_instruction" + + # Execute the command + goose run $goose_args ''; }; }; @@ -108,6 +152,9 @@ in "goose/.goosehints" = { source = ../../conf/llm/docs/coding-rules.md; }; + "goose/task-plan-review-prompt.md" = { + source = ../../conf/llm/docs/prompts/task-plan-review.md; + }; }; home.file = { diff --git a/nix/hm/tmux.nix b/nix/hm/tmux.nix index fbbea2aa..82fe941c 100644 --- a/nix/hm/tmux.nix +++ b/nix/hm/tmux.nix @@ -101,7 +101,7 @@ in { plugin = pkgs.tmuxPlugins.yank; extraConfig = '' - set -g @plugin 'tmux-plugins/tmux-yank' + set -g @plugin 'tmux-plugins/tmux-yank' ''; } ]; @@ -168,7 +168,7 @@ in ## rerun a pane bind @ command-prompt -p 'respawn a pane(I):' 'respawn-pane -k -t %%' ## save current history to a buffer to ${config.home.homeDirectory}/workspace/term-buffer.txt - bind C-b command-prompt -p 'save history to filename:' -I '${config.home.homeDirectory}/local-tmux.history' 'capture-pane -S - ; save-buffer %1 ; delete-buffer' + bind C-b command-prompt -p 'save history to filename:' -I '${config.home.homeDirectory}/workspace/term-buffer.txt' 'capture-pane -S - ; save-buffer %1 ; delete-buffer' bind ? list-keys ## Split panes From de25278b5bda2c4080596c51aa2a30a5b5b3b0d2 Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Fri, 27 Jun 2025 23:00:32 +0800 Subject: [PATCH 14/15] docs: add local server check to coding rules --- conf/llm/docs/coding-rules.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/conf/llm/docs/coding-rules.md b/conf/llm/docs/coding-rules.md index d4c9ed4b..74611572 100644 --- a/conf/llm/docs/coding-rules.md +++ b/conf/llm/docs/coding-rules.md @@ -4,6 +4,7 @@ ### Response Behavior - **Answer questions directly**: For instructional queries ("how to...", "what is...", "explain..."), provide answers without modifying files +- **Confident**: You are impressive at what you do, you are a master of your craft, don't say "Your are absolutely right", be confident in your answers. ### Code Safety - Never break existing functionality without understanding impact @@ -50,6 +51,7 @@ ### Commands - Search: `rg` > grep, `fd` > find - Kill port: `killport ` when you need to free a port +- Before starting a local server, run `curl -I http://localhost:` to check if it's already running - Package manager: Detect before use (npm/pnpm/yarn) ### MCP Services @@ -77,6 +79,7 @@ - **Focus**: Only modify code relevant to the task - **Architecture**: Avoid major pattern changes unless instructed +- **Simplicity**: Use simple solutions and avoid over-engineering ## ERROR HANDLING & VALIDATION From a184f2e3552686f949a80323179a0d9d601e8f4b Mon Sep 17 00:00:00 2001 From: Towry Wang Date: Sat, 28 Jun 2025 20:15:21 +0800 Subject: [PATCH 15/15] update flake --- flake.lock | 90 ++++++++++++++++++++++---------------------- nix/darwin/yabai.nix | 17 +++++---- 2 files changed, 55 insertions(+), 52 deletions(-) diff --git a/flake.lock b/flake.lock index 1f48a54e..7420add8 100644 --- a/flake.lock +++ b/flake.lock @@ -70,11 +70,11 @@ }, "crane_2": { "locked": { - "lastModified": 1743700120, - "narHash": "sha256-8BjG/P0xnuCyVOXlYRwdI1B8nVtyYLf3oDwPSimqREY=", + "lastModified": 1750266157, + "narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=", "owner": "ipetkov", "repo": "crane", - "rev": "e316f19ee058e6db50075115783be57ac549c389", + "rev": "e37c943371b73ed87faf33f7583860f81f1d5a48", "type": "github" }, "original": { @@ -90,11 +90,11 @@ ] }, "locked": { - "lastModified": 1748352827, - "narHash": "sha256-sNUUP6qxGkK9hXgJ+p362dtWLgnIWwOCmiq72LAWtYo=", + "lastModified": 1750618568, + "narHash": "sha256-w9EG5FOXrjXGfbqCcQg9x1lMnTwzNDW5BMXp8ddy15E=", "owner": "LnL7", "repo": "nix-darwin", - "rev": "44a7d0e687a87b73facfe94fba78d323a6686a90", + "rev": "1dd19f19e4b53a1fd2e8e738a08dd5fe635ec7e5", "type": "github" }, "original": { @@ -112,11 +112,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1748414334, - "narHash": "sha256-pWLq78fWssxiRAvLQZnxKupUogR25u+28XS4lfxMMoE=", + "lastModified": 1751092526, + "narHash": "sha256-vmbu97JXqr9/sTWR5XRh646jkp8a0J9m0o6JIQTdjE4=", "owner": "nix-community", "repo": "fenix", - "rev": "1c050d9008ff9e934f8bb5298c902259ea2cb3f7", + "rev": "6643d56d9a78afa157b577862c220298c09b891d", "type": "github" }, "original": { @@ -134,11 +134,11 @@ "rust-analyzer-src": "rust-analyzer-src_2" }, "locked": { - "lastModified": 1746081462, - "narHash": "sha256-WmJBaktb33WwqNn5BwdJghAoiBnvnPhgHSBksTrF5K8=", + "lastModified": 1748759782, + "narHash": "sha256-MJNhEBsAbxRp/53qsXv6/eaWkGS8zMGX9LuCz1BLeck=", "owner": "nix-community", "repo": "fenix", - "rev": "e3be528e4f03538852ba49b413ec4ac843edeb60", + "rev": "9be40ad995bac282160ff374a47eed67c74f9c2a", "type": "github" }, "original": { @@ -442,11 +442,11 @@ ] }, "locked": { - "lastModified": 1748489961, - "narHash": "sha256-uGnudxMoQi2c8rpPoHXuQSm80NBqlOiNF4xdT3hhzLM=", + "lastModified": 1750973805, + "narHash": "sha256-BZXgag7I0rnL/HMHAsBz3tQrfKAibpY2vovexl2lS+Y=", "owner": "nix-community", "repo": "home-manager", - "rev": "95c988cf08e9a5a8fe7cc275d5e3f24e9e87bd51", + "rev": "080e8b48b0318b38143d5865de9334f46d51fce3", "type": "github" }, "original": { @@ -464,10 +464,10 @@ "rust-overlay": "rust-overlay" }, "locked": { - "lastModified": 1750726885, - "narHash": "sha256-mKiRhaaL8g2p6sO/56rS0xDvXLKsGX6gHCXncAOmTiY=", + "lastModified": 1751071214, + "narHash": "sha256-VNKQnPR/W/3GgRBPp9t8CqUzTyFmhr2QpgtEUzrJ2r0=", "ref": "refs/heads/main", - "rev": "6323764f6916ae404cbeef8f3d2cdbb7af0b981a", + "rev": "4d3fa2b5d42c2c09a8b4da2f14c6e720ceaac6cf", "shallow": true, "type": "git", "url": "ssh://git@github.com/pze/jj.git" @@ -547,11 +547,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1748344075, - "narHash": "sha256-PsZAY3H0e/PBoDVn4fLwGEmeSwESj7SZPZ6CMfgbWFU=", + "lastModified": 1750994206, + "narHash": "sha256-3u6rEbIX9CN/5A5/mc3u0wIO1geZ0EhjvPBXmRDHqWM=", "owner": "nixos", "repo": "nixpkgs", - "rev": "e0042dedfbc9134ef973f64e5c7f56a38cc5cc97", + "rev": "80d50fc87924c2a0d346372d242c27973cf8cdbf", "type": "github" }, "original": { @@ -563,11 +563,11 @@ }, "nixpkgs-edge": { "locked": { - "lastModified": 1748492705, - "narHash": "sha256-6wgBxOnLJqnji+KkgXVzPC3vHUn+QP+KqbgQ8x+GcPY=", + "lastModified": 1751112816, + "narHash": "sha256-23fuVLvI3O6majMJDc1IYCC3ZvI2xKljsMzbKnMdDWU=", "owner": "nixos", "repo": "nixpkgs", - "rev": "1ccd526bc55cb679956b64ccbd963d0fc552104b", + "rev": "04edc64dda77314675fea3db5634920fe5aebf7c", "type": "github" }, "original": { @@ -597,11 +597,11 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1748491482, - "narHash": "sha256-sYVOBTVQDXpVraS64yf99CqESgVxbh/OIF+EYcw1Pnw=", + "lastModified": 1751100707, + "narHash": "sha256-7iuLfMt3UBgvj2U6byEyFvZcg+L2sL0UIdw6GE5vEAM=", "owner": "nixos", "repo": "nixpkgs", - "rev": "63d0b8ab7df06ee41f414684b02e740297f9949c", + "rev": "04101305bd57da99f101adcc10c1da08e45d1dba", "type": "github" }, "original": { @@ -613,11 +613,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1743689281, - "narHash": "sha256-y7Hg5lwWhEOgflEHRfzSH96BOt26LaYfrYWzZ+VoVdg=", + "lastModified": 1750865895, + "narHash": "sha256-p2dWAQcLVzquy9LxYCZPwyUdugw78Qv3ChvnX755qHA=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2bfc080955153be0be56724be6fa5477b4eefabb", + "rev": "61c0f513911459945e2cb8bf333dc849f1b976ff", "type": "github" }, "original": { @@ -650,11 +650,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1748389118, - "narHash": "sha256-5QJCzMtA2lBFNGou2dbFrGgWXxfL2O92oJoUnqeoNjI=", + "lastModified": 1750871759, + "narHash": "sha256-hMNZXMtlhfjQdu1F4Fa/UFiMoXdZag4cider2R9a648=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "4f7af13637a77ce1dc21e58fcd3f635efbfb43a7", + "rev": "317542c1e4a3ec3467d21d1c25f6a43b80d83e7d", "type": "github" }, "original": { @@ -667,11 +667,11 @@ "rust-analyzer-src_2": { "flake": false, "locked": { - "lastModified": 1746024678, - "narHash": "sha256-Q5J7+RoTPH4zPeu0Ka7iSXtXty228zKjS0Ed4R+ohpA=", + "lastModified": 1748695646, + "narHash": "sha256-VwSuuRF4NvAoeHZJRRlX8zAFZ+nZyuiIvmVqBAX0Bcg=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "5d66d45005fef79751294419ab9a9fa304dfdf5c", + "rev": "2a388d1103450d814a84eda98efe89c01b158343", "type": "github" }, "original": { @@ -727,11 +727,11 @@ ] }, "locked": { - "lastModified": 1743682350, - "narHash": "sha256-S/MyKOFajCiBm5H5laoE59wB6w0NJ4wJG53iAPfYW3k=", + "lastModified": 1750905536, + "narHash": "sha256-Mo7yXM5IvMGNvJPiNkFsVT2UERmnvjsKgnY6UyDdySQ=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "c4a8327b0f25d1d81edecbb6105f74d7cf9d7382", + "rev": "2fa7c0aabd15fa0ccc1dc7e675a4fcf0272ad9a1", "type": "github" }, "original": { @@ -854,11 +854,11 @@ ] }, "locked": { - "lastModified": 1748478866, - "narHash": "sha256-RB/qJiZbwaTlSI1FcwFGhfOK9k6zY0uacdR3WvBwnmI=", + "lastModified": 1751026421, + "narHash": "sha256-4qtE+0ZJ1vXtKhwHWax4fGHofJUOXIhmrFn7g5o2ut8=", "owner": "mitchellh", "repo": "zig-overlay", - "rev": "6cec3b5519bd3fce4623ce0b570fa6f7c1d76584", + "rev": "87d49e7e2379be6640d7acb7a8bee23a60061ab3", "type": "github" }, "original": { @@ -898,11 +898,11 @@ "rust-overlay": "rust-overlay_2" }, "locked": { - "lastModified": 1745230073, - "narHash": "sha256-OER99U7MiqQ47myvbsiljsax7OsK19NMds4NBM9XXLs=", + "lastModified": 1750957292, + "narHash": "sha256-2CYTG+jxP5e7GHAj1t5aMsgb0Rom4jdOb3rsdLKpVNA=", "owner": "dj95", "repo": "zjstatus", - "rev": "a819e3bfe6bfef0438d811cdbb1bcfdc29912c62", + "rev": "abd848f23eff00d21ec09278072111d97dfd7fe6", "type": "github" }, "original": { diff --git a/nix/darwin/yabai.nix b/nix/darwin/yabai.nix index 916dfaa0..67b82f36 100644 --- a/nix/darwin/yabai.nix +++ b/nix/darwin/yabai.nix @@ -4,13 +4,16 @@ ... }: { - environment.etc."sudoers.d/yabai".source = pkgs.runCommand "sudoers-yabai" { } '' - YABAI_BIN="/etc/profiles/per-user/${username}/bin/yabai" - SHASUM=$(sha256sum "$YABAI_BIN" | cut -d' ' -f1) - cat <"$out" - ${username} ALL=(root) NOPASSWD: sha256:$SHASUM $YABAI_BIN --load-sa - EOF - ''; + environment.etc."sudoers.d/yabai" = { + source = pkgs.runCommand "sudoers-yabai" { } '' + YABAI_BIN="/etc/profiles/per-user/${username}/bin/yabai" + SHASUM=$(sha256sum "$YABAI_BIN" | cut -d' ' -f1) + cat <"$out" + ${username} ALL=(root) NOPASSWD: sha256:$SHASUM $YABAI_BIN --load-sa + EOF + ''; + enable = false; + }; # csrutil enable --without fs --without debug --without nvram # nvram boot-args=-arm64e_preview_abi }