diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml
deleted file mode 100644
index e885e2a9..00000000
--- a/.github/workflows/auto-release.yml
+++ /dev/null
@@ -1,154 +0,0 @@
-name: Auto Release
-
-# Automatically create git tag and GitHub release when package.json version changes on main branch
-# This prevents version/tag discrepancies and ensures every version bump gets a proper release
-#
-# Workflow: PR merged → Version changed → Create tag → Create release → Trigger npm publish
-# Duration: ~1-2 minutes
-#
-# Requirements:
-# - GITHUB_TOKEN is automatically available
-# - Requires write permissions for contents
-#
-# For manual release creation, see docs/RELEASE_PROCESS.md
-
-on:
- push:
- branches:
- - main
- paths:
- - 'package.json'
-
-permissions:
- contents: write # Required to create tags and releases
-
-jobs:
- check-version:
- runs-on: ubuntu-latest
- outputs:
- version-changed: ${{ steps.check.outputs.changed }}
- new-version: ${{ steps.check.outputs.version }}
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v6
- with:
- fetch-depth: 2 # Need previous commit to compare
-
- - name: Check if version changed
- id: check
- run: |
- # Get current version from package.json
- CURRENT_VERSION=$(node -p "require('./package.json').version")
- echo "Current version: $CURRENT_VERSION"
-
- # Get previous version from parent commit
- git checkout HEAD~1 package.json 2>/dev/null || echo "First commit"
- PREVIOUS_VERSION=$(node -p "require('./package.json').version" 2>/dev/null || echo "none")
- echo "Previous version: $PREVIOUS_VERSION"
-
- # Restore package.json
- git checkout HEAD package.json
-
- # Check if version changed
- if [ "$CURRENT_VERSION" != "$PREVIOUS_VERSION" ]; then
- echo "✅ Version changed: $PREVIOUS_VERSION → $CURRENT_VERSION"
- echo "changed=true" >> $GITHUB_OUTPUT
- echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT
- else
- echo "⏭️ Version unchanged: $CURRENT_VERSION"
- echo "changed=false" >> $GITHUB_OUTPUT
- fi
-
- create-release:
- needs: check-version
- if: needs.check-version.outputs.version-changed == 'true'
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v6
-
- - name: Extract changelog for version
- id: changelog
- env:
- VERSION: ${{ needs.check-version.outputs.new-version }}
- run: |
- echo "📝 Extracting changelog for v$VERSION"
-
- # Extract changelog section for this version
- # This is a simple extraction - can be enhanced with a proper parser
- if [ -f "CHANGELOG.md" ]; then
- # Extract content between [VERSION] and next [VERSION] or end
- CHANGELOG=$(awk "/## \[$VERSION\]/,/## \[/" CHANGELOG.md | sed '$d' | tail -n +2)
-
- if [ -n "$CHANGELOG" ]; then
- echo "✅ Found changelog for v$VERSION"
- # Save to file for multi-line content
- echo "$CHANGELOG" > changelog.txt
- else
- echo "⚠️ No changelog found for v$VERSION, using default"
- echo "Release v$VERSION" > changelog.txt
- echo "" >> changelog.txt
- echo "See [CHANGELOG.md](CHANGELOG.md) for details." >> changelog.txt
- fi
- else
- echo "⚠️ CHANGELOG.md not found, using default release notes"
- echo "Release v$VERSION" > changelog.txt
- fi
-
- - name: Create git tag
- env:
- VERSION: ${{ needs.check-version.outputs.new-version }}
- run: |
- git config user.name "github-actions[bot]"
- git config user.email "github-actions[bot]@users.noreply.github.com"
-
- # Check if tag already exists (idempotent)
- if git ls-remote --tags origin | grep -q "refs/tags/v$VERSION$"; then
- echo "⏭️ Tag v$VERSION already exists, skipping tag creation"
- else
- # Create annotated tag with changelog
- git tag -a "v$VERSION" -F changelog.txt
- git push origin "v$VERSION"
- echo "✅ Created and pushed tag v$VERSION"
- fi
-
- - name: Create GitHub Release
- id: create_release
- uses: softprops/action-gh-release@v2
- with:
- tag_name: v${{ needs.check-version.outputs.new-version }}
- name: v${{ needs.check-version.outputs.new-version }}
- body_path: changelog.txt
- draft: false
- prerelease: false
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- continue-on-error: true
-
- - name: Verify release exists
- env:
- VERSION: ${{ needs.check-version.outputs.new-version }}
- REPO: ${{ github.repository }}
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- # Check if release exists (either created now or previously)
- if gh release view "v$VERSION" &>/dev/null; then
- echo "✅ Release v$VERSION exists"
- echo "📦 Tag: v$VERSION"
- echo "🔗 Release URL: https://github.com/$REPO/releases/tag/v$VERSION"
- else
- echo "❌ Release v$VERSION not found"
- echo "This should not happen - check previous steps"
- exit 1
- fi
-
- - name: Log success
- env:
- VERSION: ${{ needs.check-version.outputs.new-version }}
- REPO: ${{ github.repository }}
- run: |
- echo "🎉 Successfully ensured release v$VERSION exists"
- echo ""
- echo "⏭️ Next: The publish-npm.yml workflow will automatically publish to npm"
diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
deleted file mode 100644
index 279a0c30..00000000
--- a/.github/workflows/e2e.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-name: E2E Tests
-
-on:
- push:
- branches: [main]
- pull_request:
- branches: [main]
- paths:
- - 'src/mcp/**'
- - 'tests/e2e/**'
- - 'tests/integration/**'
- workflow_dispatch:
- inputs:
- debug:
- description: 'Enable debug mode'
- required: false
- type: boolean
- default: false
-
-concurrency:
- group: e2e-${{ github.ref }}
- cancel-in-progress: true
-
-jobs:
- e2e:
- name: E2E Tests
- runs-on: ubuntu-latest
- timeout-minutes: 20
-
- steps:
- - name: Checkout
- uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: '20'
- cache: 'npm'
-
- - name: Install dependencies
- run: npm ci
-
- - name: Build
- run: npm run build
-
- # E2E tests run sequentially with no retries
- # - singleThread: true prevents resource exhaustion
- # - retry: 0 because failures are config issues, not flakes
- # - 60s timeout per test
- - name: Run E2E tests
- run: npm run test:e2e:safe
- env:
- NODE_OPTIONS: --max-old-space-size=4096
- CI: true
-
- - name: Run integration tests
- run: npm run test:integration -- --run
- env:
- NODE_OPTIONS: --max-old-space-size=4096
-
- - name: Cleanup processes
- if: always()
- run: |
- # Kill any orphaned node processes from tests
- pkill -f "server-bootstrap" || true
- pkill -f "vitest" || true
diff --git a/.github/workflows/installation-test.yml b/.github/workflows/installation-test.yml
deleted file mode 100644
index ebfdc5dd..00000000
--- a/.github/workflows/installation-test.yml
+++ /dev/null
@@ -1,164 +0,0 @@
-name: Installation Testing (All Methods)
-
-on:
- push:
- branches: [main, develop]
- paths:
- - 'package.json'
- - 'plugin.json'
- - 'mcp.json'
- - 'scripts/**'
- - 'src/**'
- - '.github/workflows/installation-test.yml'
- pull_request:
- branches: [main, develop]
- workflow_dispatch:
-
-jobs:
- # Job 1: 基礎檢查
- basic-checks:
- name: Basic Checks
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: '20'
- cache: 'npm'
-
- - name: Validate JSON Files
- run: |
- echo "Validating package.json..."
- node -e "require('./package.json')"
-
- echo "Validating plugin.json..."
- node -e "const p = require('./plugin.json'); if (p.mcpServers) throw new Error('plugin.json should not contain mcpServers');"
-
- echo "Validating mcp.json..."
- node -e "const m = require('./mcp.json'); if (!m.memesh || !m.memesh.command) throw new Error('Invalid mcp.json');"
-
- echo "✅ All JSON files are valid"
-
- - name: Check File Structure
- run: |
- test -f package.json || exit 1
- test -f plugin.json || exit 1
- test -f mcp.json || exit 1
- test -f README.md || exit 1
- test -d src || exit 1
- test -d scripts || exit 1
- echo "✅ File structure is correct"
-
- # Job 2: 方式 1 - npm 全域安裝測試
- npm-install-test:
- name: Method 1 - npm Global Install
- runs-on: ubuntu-latest
- needs: basic-checks
-
- strategy:
- matrix:
- node-version: ['20', '22']
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: ${{ matrix.node-version }}
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run tests
- run: npm test
-
- - name: Build project
- run: npm run build
-
- - name: Test npm pack
- run: |
- npm pack
- TARBALL=$(ls -t pcircle-memesh-*.tgz | head -1)
- tar -tzf "$TARBALL" | grep -q "package/mcp.json" || exit 1
- tar -tzf "$TARBALL" | grep -q "package/plugin.json" || exit 1
- echo "✅ npm pack successful"
-
- # Job 3: 方式 2 - Plugin Build 測試
- plugin-build-test:
- name: Method 2 - Plugin Build
- runs-on: ubuntu-latest
- needs: basic-checks
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: '20'
- cache: 'npm'
-
- - name: Install dependencies
- run: npm ci
-
- - name: Build project
- run: npm run build
-
- - name: Build plugin
- run: npm run build:plugin
-
- - name: Verify plugin structure
- run: |
- test -f .claude-plugin/memesh/.claude-plugin/plugin.json || exit 1
- test -f .claude-plugin/memesh/.mcp.json || exit 1
- test -f .claude-plugin/memesh/dist/mcp/server-bootstrap.js || exit 1
- test -d .claude-plugin/memesh/node_modules || exit 1
- echo "✅ Plugin structure verified"
-
- - name: Test MCP Server standalone
- run: |
- chmod +x ./scripts/test-mcp-server-standalone.sh
- ./scripts/test-mcp-server-standalone.sh
-
- # Job 4: Docker 測試
- docker-test:
- name: Method 3 - Docker Clean Install
- runs-on: ubuntu-latest
- needs: basic-checks
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Build test Docker image
- run: docker build -f Dockerfile.test -t memesh-test:latest .
-
- - name: Run tests in Docker
- run: docker run --rm memesh-test:latest
-
- # Job 5: 安全性檢查
- security-check:
- name: Security Checks
- runs-on: ubuntu-latest
- needs: basic-checks
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: '20'
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run npm audit
- run: npm audit --audit-level=high || echo "Audit warnings"
-
- - name: Check for secrets
- run: |
- echo "✅ No leaked secrets found"
diff --git a/.github/workflows/plugin-deployment-check.yml b/.github/workflows/plugin-deployment-check.yml
deleted file mode 100644
index a0ab4320..00000000
--- a/.github/workflows/plugin-deployment-check.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: Plugin Deployment Pre-Check
-
-on:
- workflow_dispatch:
- push:
- tags:
- - 'v*'
-
-jobs:
- deployment-check:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v6
-
- - name: Setup Node.js
- uses: actions/setup-node@v6
- with:
- node-version: '20'
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run comprehensive pre-deployment check
- run: |
- chmod +x scripts/pre-deployment-check.sh
- ./scripts/pre-deployment-check.sh
diff --git a/.mcp.json b/.mcp.json
index 6243d15d..28c9741d 100644
--- a/.mcp.json
+++ b/.mcp.json
@@ -2,11 +2,8 @@
"mcpServers": {
"memesh": {
"command": "node",
- "args": ["${CLAUDE_PLUGIN_ROOT}/dist/mcp/server-bootstrap.js"],
- "env": {
- "NODE_ENV": "production",
- "MEMESH_DISABLE_DAEMON": "1"
- }
+ "args": ["${CLAUDE_PLUGIN_ROOT}/dist/mcp/server.js"],
+ "env": { "NODE_ENV": "production" }
}
}
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3d4159ab..aa936eff 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,42 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+### Added
+
+- **`memesh-view` CLI command** — generates self-contained HTML dashboard with D3.js force-directed knowledge graph, searchable entity table, and statistics
+- Fixed vitest pool from `threads` to `forks` to prevent SIGSEGV with better-sqlite3 native module
+
+## [3.0.0-alpha.1] - 2026-03-09
+
+### Breaking Changes
+
+- **Minimal core rewrite** — stripped from 50+ source files to 5, 26 dependencies to 3
+- **3 MCP tools only**: `remember`, `recall`, `forget` (removed buddy-do, buddy-help, memesh-hook-tool-use, memesh-generate-tests, memesh-metrics)
+- **2 hooks only**: session-start, post-commit (removed pre-tool-use, post-tool-use, stop, subagent-stop)
+
+### Removed
+
+- Vector search (ONNX embeddings, sqlite-vec, EmbeddingService)
+- Daemon/proxy server modes (standalone only)
+- CLI features (commander, inquirer, interactive prompts)
+- HTTP server (express)
+- All UI formatting (chalk, boxen, ora, cli-spinners, cli-table3, asciichart)
+- Logging framework (winston)
+- 23 production dependencies
+
+### Architecture
+
+- **Database**: Direct better-sqlite3 with FTS5 full-text search
+- **Server**: Standalone MCP via StdioServerTransport
+- **Validation**: Zod schemas for all tool inputs
+- **Backward compatible**: Existing DB data (entities, observations, relations, tags) preserved and queryable
+
+## [2.10.2] - 2026-03-09
+
+### Changed
+
+- Clean up changelog entries
+
## [2.10.1] - 2026-03-09
### Fixed
@@ -15,7 +51,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
-- Untrack local-only files from git (CLAUDE.md, .claude/, media/demo-video/, Dockerfile.test)
+- Streamline repository for professional open source standards
+- Simplify documentation and build configuration
## [2.10.0] - 2026-03-08
diff --git a/README.de.md b/README.de.md
deleted file mode 100644
index 495ebf7d..00000000
--- a/README.de.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### Deine KI-Coding-Sitzungen verdienen ein Gedächtnis.
-
-MeMesh Plugin gibt Claude Code ein persistentes, durchsuchbares Gedächtnis — damit jede Sitzung auf der vorherigen aufbaut.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Loslegen](#loslegen) · [So funktioniert es](#so-funktioniert-es) · [Befehle](#befehle) · [Dokumentation](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · **Deutsch** · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **Hinweis**: Dieses Projekt hieß ursprünglich „Claude Code Buddy" und wurde in MeMesh Plugin umbenannt, um mögliche Markenrechtsprobleme zu vermeiden.
-
----
-
-## Das Problem
-
-Du steckst mitten in einem Projekt mit Claude Code. Vor drei Sitzungen hast du wichtige Entscheidungen getroffen — welche Auth-Bibliothek, warum dieses Datenbankschema, welche Muster zu verwenden sind. Aber Claude erinnert sich nicht. Du wiederholst dich. Der Kontext geht verloren. Du verschwendest Zeit.
-
-**MeMesh löst das.** Es gibt Claude ein persistentes, durchsuchbares Gedächtnis, das mit deinem Projekt wächst.
-
----
-
-## So funktioniert es
-
-
-
-
-
-### Vor MeMesh
-```
-Sitzung 1: "Verwende JWT für Auth"
-Sitzung 2: "Warum haben wir JWT gewählt?"
-Sitzung 3: "Moment, welche Auth-Bibliothek nutzen wir?"
-```
-Du wiederholst Entscheidungen. Claude vergisst den Kontext. Der Fortschritt stockt.
-
-
-
-
-### Nach MeMesh
-```
-Sitzung 1: "Verwende JWT für Auth" → gespeichert
-Sitzung 2: buddy-remember "auth" → sofortiger Abruf
-Sitzung 3: Kontext wird beim Start automatisch geladen
-```
-Jede Sitzung knüpft dort an, wo du aufgehört hast.
-
-
-
-
-
----
-
-## Was du bekommst
-
-**Durchsuchbares Projektgedächtnis** — Frage „Was haben wir über Auth entschieden?" und erhalte sofort eine semantisch abgeglichene Antwort. Keine Stichwortsuche — *Bedeutungssuche*, angetrieben durch lokale ONNX-Embeddings.
-
-**Intelligente Aufgabenanalyse** — `buddy-do "Benutzer-Auth hinzufügen"` führt nicht einfach aus. Es zieht relevanten Kontext aus vergangenen Sitzungen, prüft welche Muster du etabliert hast und erstellt einen angereicherten Plan, bevor eine einzige Zeile geschrieben wird.
-
-**Proaktiver Abruf** — MeMesh ruft automatisch relevante Erinnerungen ab, wenn du eine Sitzung startest, ein Test fehlschlägt oder ein Fehler auftritt. Kein manuelles Suchen nötig.
-
-**Workflow-Automatisierung** — Sitzungszusammenfassungen beim Start. Verfolgung von Dateiänderungen. Code-Review-Erinnerungen vor Commits. Alles läuft leise im Hintergrund.
-
-**Aus Fehlern lernen** — Fehler und Korrekturen aufzeichnen, um eine Wissensbasis aufzubauen. Derselbe Fehler passiert nicht zweimal.
-
----
-
-## Loslegen
-
-**Voraussetzungen**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Claude Code neu starten. Das war's.
-
-**Überprüfen** — tippe in Claude Code:
-
-```
-buddy-help
-```
-
-Du solltest eine Liste der verfügbaren Befehle sehen.
-
-
-Aus dem Quellcode installieren (für Mitwirkende)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## Befehle
-
-| Befehl | Was er tut |
-|--------|-----------|
-| `buddy-do "Aufgabe"` | Aufgabe mit vollem Gedächtniskontext ausführen |
-| `buddy-remember "Thema"` | Vergangene Entscheidungen und Kontext durchsuchen |
-| `buddy-help` | Verfügbare Befehle anzeigen |
-
-**Praxisbeispiele:**
-
-```bash
-# Sich in einer neuen Codebase orientieren
-buddy-do "explain this codebase"
-
-# Features mit Kontext aus vergangener Arbeit bauen
-buddy-do "add user authentication"
-
-# Nachvollziehen, warum Entscheidungen getroffen wurden
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-Alle Daten bleiben auf deinem Rechner mit automatischer 90-Tage-Aufbewahrung.
-
----
-
-## Wie unterscheidet sich das von CLAUDE.md?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **Zweck** | Statische Anweisungen für Claude | Lebendiges Gedächtnis, das mit deinem Projekt wächst |
-| **Suche** | Manuelle Textsuche | Semantische Suche nach Bedeutung |
-| **Aktualisierung** | Du bearbeitest manuell | Erfasst Entscheidungen automatisch während der Arbeit |
-| **Abruf** | Wird immer geladen (kann lang werden) | Zeigt relevanten Kontext bei Bedarf |
-| **Umfang** | Allgemeine Präferenzen | Projektspezifischer Wissensgraph |
-
-**Sie ergänzen sich.** CLAUDE.md sagt Claude, *wie* er arbeiten soll. MeMesh erinnert sich daran, *was* ihr gebaut habt.
-
----
-
-## Plattformunterstützung
-
-| Plattform | Status |
-|-----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 empfohlen) |
-
-**Funktioniert mit:** Claude Code CLI · VS Code Extension · Cursor (via MCP) · Jeder MCP-kompatible Editor
-
----
-
-## Architektur
-
-MeMesh läuft als Claude Code Plugin lokal mit integrierter MCP-Komponente:
-
-- **Knowledge Graph** — SQLite-basierter Entity-Speicher mit FTS5-Volltextsuche
-- **Vector Embeddings** — ONNX Runtime für semantische Ähnlichkeit (läuft 100% lokal)
-- **Content Dedup** — SHA-256-Hashing überspringt redundante Embedding-Berechnungen
-- **Batch Processing** — Effiziente Massenoperationen für große Wissensbasen
-- **Hook System** — Proaktiver Abruf bei Sitzungsstart, Testfehlern und Fehlern
-
-Alles läuft lokal. Keine Cloud. Keine API-Aufrufe. Deine Daten verlassen nie deinen Rechner.
-
----
-
-## Dokumentation
-
-| Dokument | Beschreibung |
-|----------|-------------|
-| [Erste Schritte](docs/GETTING_STARTED.md) | Schritt-für-Schritt-Einrichtungsanleitung |
-| [Benutzerhandbuch](docs/USER_GUIDE.md) | Vollständige Anleitung mit Beispielen |
-| [Befehle](docs/COMMANDS.md) | Komplette Befehlsreferenz |
-| [Architektur](docs/ARCHITECTURE.md) | Technischer Tiefgang |
-| [Mitwirken](CONTRIBUTING.md) | Richtlinien für Beiträge |
-| [Entwicklung](docs/DEVELOPMENT.md) | Entwicklungsumgebung für Mitwirkende |
-
----
-
-## Mitwirken
-
-Beiträge sind willkommen! Siehe [CONTRIBUTING.md](CONTRIBUTING.md) für den Einstieg.
-
----
-
-## Lizenz
-
-MIT — Siehe [LICENSE](LICENSE)
-
----
-
-
-
-**Gebaut mit Claude Code, für Claude Code.**
-
-[Bug melden](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Feature anfragen](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Hilfe erhalten](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.es.md b/README.es.md
deleted file mode 100644
index 19e55a4b..00000000
--- a/README.es.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### Tus sesiones de programación con IA merecen memoria.
-
-MeMesh Plugin le da a Claude Code una memoria persistente y consultable — para que cada sesión se base en la anterior.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Comenzar](#comenzar) · [Cómo funciona](#cómo-funciona) · [Comandos](#comandos) · [Docs](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · **Español** · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **Nota**: Este proyecto se llamaba originalmente "Claude Code Buddy" y fue renombrado a MeMesh Plugin para evitar posibles problemas de marca registrada.
-
----
-
-## El problema
-
-Estás metido de lleno en un proyecto con Claude Code. Tomaste decisiones importantes hace tres sesiones — qué biblioteca de autenticación usar, por qué elegiste ese esquema de base de datos, qué patrones seguir. Pero Claude no recuerda. Te repites. Pierdes contexto. Pierdes tiempo.
-
-**MeMesh soluciona esto.** Le da a Claude una memoria persistente y consultable que crece con tu proyecto.
-
----
-
-## Cómo funciona
-
-
-
-
-
-### Antes de MeMesh
-```
-Session 1: "Use JWT for auth"
-Session 2: "Why did we pick JWT again?"
-Session 3: "Wait, what auth library are we using?"
-```
-Repites decisiones. Claude olvida el contexto. El progreso se detiene.
-
-
-
-
-### Después de MeMesh
-```
-Session 1: "Use JWT for auth" → saved
-Session 2: buddy-remember "auth" → instant recall
-Session 3: Context auto-loaded on start
-```
-Cada sesión continúa donde lo dejaste.
-
-
-
-
-
----
-
-## Lo que obtienes
-
-**Memoria de proyecto consultable** — Pregunta "¿qué decidimos sobre la auth?" y obtén una respuesta instantánea con coincidencia semántica. No es búsqueda por palabras clave — es búsqueda por *significado*, impulsada por embeddings ONNX locales.
-
-**Análisis inteligente de tareas** — `buddy-do "add user auth"` no solo ejecuta. Extrae contexto relevante de sesiones anteriores, verifica qué patrones has establecido y construye un plan enriquecido antes de escribir una sola línea.
-
-**Recuperación proactiva** — MeMesh muestra automáticamente memorias relevantes cuando inicias una sesión, cuando falla un test o cuando encuentras un error. Sin búsqueda manual necesaria.
-
-**Automatización del flujo de trabajo** — Resúmenes de sesión al iniciar. Seguimiento de cambios en archivos. Recordatorios de revisión de código antes de commits. Todo ejecutándose silenciosamente en segundo plano.
-
-**Aprendizaje de errores** — Registra errores y correcciones para construir una base de conocimiento. El mismo error no ocurre dos veces.
-
----
-
-## Comenzar
-
-**Requisitos previos**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Reinicia Claude Code. Eso es todo.
-
-**Verificar** — escribe en Claude Code:
-
-```
-buddy-help
-```
-
-Deberías ver una lista de comandos disponibles.
-
-
-Instalar desde el código fuente (contribuidores)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## Comandos
-
-| Comando | Qué hace |
-|---------|----------|
-| `buddy-do "tarea"` | Ejecutar una tarea con contexto de memoria completo |
-| `buddy-remember "tema"` | Buscar decisiones y contexto anteriores |
-| `buddy-help` | Mostrar comandos disponibles |
-
-**Ejemplos reales:**
-
-```bash
-# Orientarte en un codebase nuevo para ti
-buddy-do "explain this codebase"
-
-# Construir funcionalidades con contexto de trabajo anterior
-buddy-do "add user authentication"
-
-# Recordar por qué se tomaron decisiones
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-Todos los datos permanecen en tu máquina con retención automática de 90 días.
-
----
-
-## ¿En qué se diferencia de CLAUDE.md?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **Propósito** | Instrucciones estáticas para Claude | Memoria viva que crece con tu proyecto |
-| **Búsqueda** | Búsqueda manual de texto | Búsqueda semántica por significado |
-| **Actualizaciones** | Las editas manualmente | Captura decisiones automáticamente mientras trabajas |
-| **Recuperación** | Siempre cargado (puede volverse extenso) | Muestra contexto relevante bajo demanda |
-| **Alcance** | Preferencias generales | Grafo de conocimiento específico del proyecto |
-
-**Funcionan juntos.** CLAUDE.md le dice a Claude *cómo* trabajar. MeMesh recuerda *qué* has construido.
-
----
-
-## Plataformas soportadas
-
-| Plataforma | Estado |
-|------------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 recomendado) |
-
-**Compatible con:** Claude Code CLI · VS Code Extension · Cursor (vía MCP) · Cualquier editor compatible con MCP
-
----
-
-## Arquitectura
-
-MeMesh funciona como un plugin de Claude Code en local, con un componente MCP integrado:
-
-- **Grafo de conocimiento** — Almacén de entidades respaldado por SQLite con búsqueda de texto completo FTS5
-- **Embeddings vectoriales** — Runtime ONNX para similitud semántica (se ejecuta 100% localmente)
-- **Deduplicación de contenido** — Hashing SHA-256 omite cálculos de embedding redundantes
-- **Procesamiento por lotes** — Operaciones masivas eficientes para grandes bases de conocimiento
-- **Sistema de hooks** — Recuperación proactiva al iniciar sesión, fallos de tests y errores
-
-Todo se ejecuta localmente. Sin nube. Sin llamadas API. Tus datos nunca salen de tu máquina.
-
----
-
-## Documentación
-
-| Documento | Descripción |
-|-----------|-------------|
-| [Primeros pasos](docs/GETTING_STARTED.md) | Guía de configuración paso a paso |
-| [Guía de usuario](docs/USER_GUIDE.md) | Guía completa de uso con ejemplos |
-| [Comandos](docs/COMMANDS.md) | Referencia completa de comandos |
-| [Arquitectura](docs/ARCHITECTURE.md) | Análisis técnico en profundidad |
-| [Contribuir](CONTRIBUTING.md) | Directrices para contribuir |
-| [Desarrollo](docs/DEVELOPMENT.md) | Configuración de desarrollo para contribuidores |
-
----
-
-## Contribuir
-
-¡Las contribuciones son bienvenidas! Consulta [CONTRIBUTING.md](CONTRIBUTING.md) para comenzar.
-
----
-
-## Licencia
-
-MIT — Ver [LICENSE](LICENSE)
-
----
-
-
-
-**Construido con Claude Code, para Claude Code.**
-
-[Reportar bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Solicitar función](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Obtener ayuda](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.fr.md b/README.fr.md
deleted file mode 100644
index 0e2e3808..00000000
--- a/README.fr.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### Vos sessions de code avec l'IA méritent une mémoire.
-
-MeMesh Plugin offre à Claude Code une mémoire persistante et consultable — chaque session s'appuie sur la précédente.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Démarrage](#démarrage) · [Fonctionnement](#fonctionnement) · [Commandes](#commandes) · [Documentation](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · **Français** · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **Note** : Ce projet s'appelait à l'origine « Claude Code Buddy » et a été renommé MeMesh Plugin pour éviter d'éventuels problèmes de marque.
-
----
-
-## Le problème
-
-Vous travaillez sur un projet avec Claude Code. Vous avez pris des décisions importantes il y a trois sessions — quelle bibliothèque d'authentification utiliser, pourquoi ce schéma de base de données, quels patterns suivre. Mais Claude ne s'en souvient pas. Vous vous répétez. Vous perdez le contexte. Vous perdez du temps.
-
-**MeMesh résout ce problème.** Il offre à Claude une mémoire persistante et consultable qui grandit avec votre projet.
-
----
-
-## Fonctionnement
-
-
-
-
-
-### Avant MeMesh
-```
-Session 1 : "Utiliser JWT pour l'auth"
-Session 2 : "Pourquoi on avait choisi JWT déjà ?"
-Session 3 : "Attends, on utilise quelle bibliothèque d'auth ?"
-```
-Vous répétez vos décisions. Claude oublie le contexte. La progression stagne.
-
-
-
-
-### Après MeMesh
-```
-Session 1 : "Utiliser JWT pour l'auth" → sauvegardé
-Session 2 : buddy-remember "auth" → rappel instantané
-Session 3 : Contexte chargé automatiquement au démarrage
-```
-Chaque session reprend là où vous vous étiez arrêté.
-
-
-
-
-
----
-
-## Ce que vous obtenez
-
-**Mémoire de projet consultable** — Demandez "qu'est-ce qu'on avait décidé pour l'auth ?" et obtenez une réponse instantanée par correspondance sémantique. Pas une recherche par mots-clés — une recherche par *sens*, propulsée par des embeddings ONNX locaux.
-
-**Analyse intelligente des tâches** — `buddy-do "ajouter l'auth utilisateur"` ne se contente pas d'exécuter. Il récupère le contexte pertinent des sessions passées, vérifie les patterns que vous avez établis, et construit un plan enrichi avant d'écrire la moindre ligne.
-
-**Rappel proactif** — MeMesh fait remonter automatiquement les souvenirs pertinents au démarrage d'une session, lors d'un échec de test ou d'une erreur. Aucune recherche manuelle nécessaire.
-
-**Automatisation du workflow** — Récapitulatifs de session au démarrage. Suivi des modifications de fichiers. Rappels de revue de code avant les commits. Le tout fonctionne silencieusement en arrière-plan.
-
-**Apprentissage des erreurs** — Enregistrez les erreurs et leurs corrections pour construire une base de connaissances. La même erreur ne se reproduit pas deux fois.
-
----
-
-## Démarrage
-
-**Prérequis** : [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Redémarrez Claude Code. C'est tout.
-
-**Vérification** — tapez dans Claude Code :
-
-```
-buddy-help
-```
-
-Vous devriez voir une liste des commandes disponibles.
-
-
-Installation depuis les sources (contributeurs)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## Commandes
-
-| Commande | Ce qu'elle fait |
-|----------|----------------|
-| `buddy-do "tâche"` | Exécuter une tâche avec le contexte mémoire complet |
-| `buddy-remember "sujet"` | Rechercher les décisions et le contexte passés |
-| `buddy-help` | Afficher les commandes disponibles |
-
-**Exemples concrets :**
-
-```bash
-# Se repérer dans un nouveau codebase
-buddy-do "explain this codebase"
-
-# Développer des fonctionnalités avec le contexte du travail passé
-buddy-do "add user authentication"
-
-# Retrouver pourquoi des décisions ont été prises
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-Toutes les données restent sur votre machine avec une rétention automatique de 90 jours.
-
----
-
-## En quoi est-ce différent de CLAUDE.md ?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **Objectif** | Instructions statiques pour Claude | Mémoire vivante qui grandit avec votre projet |
-| **Recherche** | Recherche textuelle manuelle | Recherche sémantique par sens |
-| **Mises à jour** | Vous éditez manuellement | Capture automatique des décisions en cours de travail |
-| **Rappel** | Toujours chargé (peut devenir long) | Fait remonter le contexte pertinent à la demande |
-| **Portée** | Préférences générales | Graphe de connaissances spécifique au projet |
-
-**Ils fonctionnent ensemble.** CLAUDE.md indique à Claude *comment* travailler. MeMesh se souvient de *ce que* vous avez construit.
-
----
-
-## Plateformes supportées
-
-| Plateforme | Statut |
-|-----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 recommandé) |
-
-**Compatible avec :** Claude Code CLI · VS Code Extension · Cursor (via MCP) · Tout éditeur compatible MCP
-
----
-
-## Architecture
-
-MeMesh fonctionne comme un plugin Claude Code en local, avec un composant MCP intégré :
-
-- **Graphe de connaissances** — Stockage d'entités basé sur SQLite avec recherche plein texte FTS5
-- **Embeddings vectoriels** — Runtime ONNX pour la similarité sémantique (100 % local)
-- **Déduplication de contenu** — Hachage SHA-256 pour éviter les calculs d'embeddings redondants
-- **Traitement par lots** — Opérations en masse efficaces pour les grandes bases de connaissances
-- **Système de hooks** — Rappel proactif au démarrage de session, en cas d'échec de tests et d'erreurs
-
-Tout fonctionne localement. Pas de cloud. Pas d'appels API. Vos données ne quittent jamais votre machine.
-
----
-
-## Documentation
-
-| Document | Description |
-|----------|-------------|
-| [Démarrage](docs/GETTING_STARTED.md) | Guide d'installation pas à pas |
-| [Guide utilisateur](docs/USER_GUIDE.md) | Guide complet avec exemples |
-| [Commandes](docs/COMMANDS.md) | Référence complète des commandes |
-| [Architecture](docs/ARCHITECTURE.md) | Plongée technique approfondie |
-| [Contribuer](CONTRIBUTING.md) | Guide de contribution |
-| [Développement](docs/DEVELOPMENT.md) | Configuration pour les contributeurs |
-
----
-
-## Contribuer
-
-Les contributions sont les bienvenues ! Consultez [CONTRIBUTING.md](CONTRIBUTING.md) pour commencer.
-
----
-
-## Licence
-
-MIT — Voir [LICENSE](LICENSE)
-
----
-
-
-
-**Construit avec Claude Code, pour Claude Code.**
-
-[Signaler un bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Demander une fonctionnalité](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Obtenir de l'aide](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.id.md b/README.id.md
deleted file mode 100644
index 3d0d74bb..00000000
--- a/README.id.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### Sesi coding AI Anda layak memiliki memori.
-
-MeMesh Plugin memberikan Claude Code memori yang persisten dan dapat dicari — sehingga setiap sesi melanjutkan dari yang terakhir.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Mulai Sekarang](#mulai-sekarang) · [Cara Kerja](#cara-kerja) · [Perintah](#perintah) · [Dokumentasi](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · **Bahasa Indonesia**
-
-
-
-> **Catatan**: Proyek ini awalnya bernama "Claude Code Buddy" dan telah diganti nama menjadi MeMesh Plugin untuk menghindari masalah merek dagang.
-
----
-
-## Masalahnya
-
-Anda sedang mengerjakan proyek dengan Claude Code. Anda membuat keputusan penting tiga sesi yang lalu — pustaka auth mana yang dipilih, mengapa memilih skema database itu, pola apa yang harus diikuti. Tapi Claude tidak ingat. Anda mengulang penjelasan. Anda kehilangan konteks. Anda membuang waktu.
-
-**MeMesh memperbaiki ini.** MeMesh memberikan Claude memori yang persisten dan dapat dicari yang berkembang bersama proyek Anda.
-
----
-
-## Cara Kerja
-
-
-
-
-
-### Sebelum MeMesh
-```
-Sesi 1: "Gunakan JWT untuk auth"
-Sesi 2: "Kenapa kita pilih JWT tadi?"
-Sesi 3: "Tunggu, pustaka auth apa yang kita pakai?"
-```
-Anda mengulang keputusan. Claude lupa konteks. Progres terhenti.
-
-
-
-
-### Setelah MeMesh
-```
-Sesi 1: "Gunakan JWT untuk auth" → tersimpan
-Sesi 2: buddy-remember "auth" → langsung teringat
-Sesi 3: Konteks dimuat otomatis saat mulai
-```
-Setiap sesi melanjutkan dari yang terakhir.
-
-
-
-
-
----
-
-## Yang Anda Dapatkan
-
-**Memori Proyek yang Dapat Dicari** — Tanya "apa yang kita putuskan tentang auth?" dan dapatkan jawaban instan yang dicocokkan secara semantik. Bukan pencarian kata kunci — pencarian berdasarkan *makna*, didukung oleh embedding ONNX lokal.
-
-**Analisis Tugas Cerdas** — `buddy-do "tambahkan auth pengguna"` tidak langsung mengeksekusi. Ia mengambil konteks relevan dari sesi sebelumnya, memeriksa pola yang sudah Anda tetapkan, dan menyusun rencana yang diperkaya sebelum menulis satu baris kode pun.
-
-**Pemanggilan Proaktif** — MeMesh secara otomatis memunculkan memori yang relevan saat Anda memulai sesi, menghadapi kegagalan pengujian, atau menemui error. Tidak perlu pencarian manual.
-
-**Otomasi Alur Kerja** — Ringkasan sesi saat startup. Pelacakan perubahan file. Pengingat code review sebelum commit. Semuanya berjalan diam-diam di latar belakang.
-
-**Belajar dari Kesalahan** — Catat error dan perbaikannya untuk membangun basis pengetahuan. Kesalahan yang sama tidak terjadi dua kali.
-
----
-
-## Mulai Sekarang
-
-**Prasyarat**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Restart Claude Code. Selesai.
-
-**Verifikasi** — ketik di Claude Code:
-
-```
-buddy-help
-```
-
-Anda akan melihat daftar perintah yang tersedia.
-
-
-Instalasi dari source (kontributor)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## Perintah
-
-| Perintah | Fungsi |
-|----------|--------|
-| `buddy-do "tugas"` | Jalankan tugas dengan konteks memori penuh |
-| `buddy-remember "topik"` | Cari keputusan dan konteks sebelumnya |
-| `buddy-help` | Tampilkan perintah yang tersedia |
-
-**Contoh nyata:**
-
-```bash
-# Pahami codebase yang baru Anda temui
-buddy-do "explain this codebase"
-
-# Bangun fitur dengan konteks dari pekerjaan sebelumnya
-buddy-do "add user authentication"
-
-# Ingat kembali mengapa keputusan dibuat
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-Semua data tetap di mesin Anda dengan retensi otomatis 90 hari.
-
----
-
-## Apa Bedanya dengan CLAUDE.md?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **Tujuan** | Instruksi statis untuk Claude | Memori hidup yang berkembang bersama proyek |
-| **Pencarian** | Pencarian teks manual | Pencarian semantik berdasarkan makna |
-| **Pembaruan** | Anda edit secara manual | Otomatis menangkap keputusan saat Anda bekerja |
-| **Pemanggilan** | Selalu dimuat (bisa jadi panjang) | Memunculkan konteks relevan sesuai kebutuhan |
-| **Cakupan** | Preferensi umum | Grafik pengetahuan khusus proyek |
-
-**Keduanya bekerja bersama.** CLAUDE.md memberi tahu Claude *cara* bekerja. MeMesh mengingat *apa* yang telah Anda bangun.
-
----
-
-## Dukungan Platform
-
-| Platform | Status |
-|----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 direkomendasikan) |
-
-**Kompatibel dengan:** Claude Code CLI · VS Code Extension · Cursor (via MCP) · Editor mana pun yang kompatibel MCP
-
----
-
-## Arsitektur
-
-MeMesh berjalan sebagai plugin Claude Code secara lokal, dengan komponen MCP terintegrasi:
-
-- **Knowledge Graph** — Penyimpanan entitas berbasis SQLite dengan pencarian teks lengkap FTS5
-- **Vector Embeddings** — Runtime ONNX untuk kesamaan semantik (berjalan 100% lokal)
-- **Content Dedup** — Hashing SHA-256 melewati komputasi embedding yang redundan
-- **Batch Processing** — Operasi massal yang efisien untuk basis pengetahuan besar
-- **Hook System** — Pemanggilan proaktif saat memulai sesi, kegagalan pengujian, dan error
-
-Semuanya berjalan secara lokal. Tanpa cloud. Tanpa panggilan API. Data Anda tidak pernah meninggalkan mesin Anda.
-
----
-
-## Dokumentasi
-
-| Dokumen | Deskripsi |
-|---------|-----------|
-| [Memulai](docs/GETTING_STARTED.md) | Panduan setup langkah demi langkah |
-| [Panduan Pengguna](docs/USER_GUIDE.md) | Panduan penggunaan lengkap dengan contoh |
-| [Perintah](docs/COMMANDS.md) | Referensi perintah lengkap |
-| [Arsitektur](docs/ARCHITECTURE.md) | Penjelasan teknis mendalam |
-| [Kontribusi](CONTRIBUTING.md) | Panduan kontribusi |
-| [Pengembangan](docs/DEVELOPMENT.md) | Setup pengembangan untuk kontributor |
-
----
-
-## Kontribusi
-
-Kami menyambut kontribusi! Lihat [CONTRIBUTING.md](CONTRIBUTING.md) untuk memulai.
-
----
-
-## Lisensi
-
-MIT — Lihat [LICENSE](LICENSE)
-
----
-
-
-
-**Dibangun dengan Claude Code, untuk Claude Code.**
-
-[Laporkan Bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Minta Fitur](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Dapatkan Bantuan](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.ja.md b/README.ja.md
deleted file mode 100644
index c9b175f9..00000000
--- a/README.ja.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### あなたの AI コーディングセッションにはメモリが必要です。
-
-MeMesh Plugin は Claude Code に永続的で検索可能なメモリを提供し、すべてのセッションが前回の続きから始まります。
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[はじめに](#はじめに) · [仕組み](#仕組み) · [コマンド](#コマンド) · [ドキュメント](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · **日本語** · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **注意**:このプロジェクトは元々「Claude Code Buddy」という名前でしたが、商標上の問題を避けるため MeMesh Plugin に改名されました。
-
----
-
-## 問題
-
-Claude Code でプロジェクトに深く取り組んでいるとき、3 セッション前に重要な決定をしたはずです — どの認証ライブラリを使うか、なぜそのデータベーススキーマを選んだか、どのパターンに従うか。しかし Claude は覚えていません。同じ説明を繰り返し、コンテキストを失い、時間を無駄にします。
-
-**MeMesh がこれを解決します。** プロジェクトと共に成長する、永続的で検索可能なメモリを Claude に提供します。
-
----
-
-## 仕組み
-
-
-
-
-
-### MeMesh 導入前
-```
-セッション 1: 「認証には JWT を使おう」
-セッション 2: 「なぜ JWT にしたんだっけ?」
-セッション 3: 「え、どの認証ライブラリ使ってるの?」
-```
-決定を繰り返す。Claude はコンテキストを忘れる。進捗が止まる。
-
-
-
-
-### MeMesh 導入後
-```
-セッション 1: 「認証には JWT を使おう」→ 保存済み
-セッション 2: buddy-remember "auth" → 即座に呼び出し
-セッション 3: 開始時にコンテキスト自動読み込み
-```
-すべてのセッションが前回の続きから始まります。
-
-
-
-
-
----
-
-## できること
-
-**検索可能なプロジェクトメモリ** — 「認証についてどう決めた?」と聞けば、意味に基づいて即座にマッチした回答が返ります。キーワード検索ではなく、ローカル ONNX 埋め込みによる*意味*検索です。
-
-**スマートタスク分析** — `buddy-do "ユーザー認証を追加"` はただ実行するだけではありません。過去のセッションから関連コンテキストを引き出し、確立されたパターンを確認し、一行のコードを書く前に充実したプランを構築します。
-
-**プロアクティブな呼び出し** — MeMesh はセッション開始時、テスト失敗時、エラー発生時に関連するメモリを自動的に表示します。手動検索は不要です。
-
-**ワークフロー自動化** — 起動時のセッション要約。ファイル変更の追跡。コミット前のコードレビューリマインダー。すべてバックグラウンドで静かに動作します。
-
-**ミスから学ぶ** — エラーと修正を記録してナレッジベースを構築。同じミスを二度と繰り返しません。
-
----
-
-## はじめに
-
-**必要なもの**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Claude Code を再起動すれば完了です。
-
-**動作確認** — Claude Code で以下を入力:
-
-```
-buddy-help
-```
-
-利用可能なコマンドの一覧が表示されます。
-
-
-ソースからインストール (コントリビューター向け)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## コマンド
-
-| コマンド | 機能 |
-|---------|------|
-| `buddy-do "タスク"` | メモリコンテキスト付きでタスクを実行 |
-| `buddy-remember "トピック"` | 過去の決定やコンテキストを検索 |
-| `buddy-help` | 利用可能なコマンドを表示 |
-
-**実際の例:**
-
-```bash
-# 初めてのコードベースを理解する
-buddy-do "explain this codebase"
-
-# 過去の作業のコンテキストを活用して機能を構築
-buddy-do "add user authentication"
-
-# なぜその決定をしたか思い出す
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-すべてのデータはローカルマシンに保存され、自動的に 90 日間保持されます。
-
----
-
-## CLAUDE.md との違いは?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **目的** | Claude への静的な指示 | プロジェクトと共に成長する生きたメモリ |
-| **検索** | 手動テキスト検索 | 意味に基づくセマンティック検索 |
-| **更新** | 手動で編集 | 作業中に決定事項を自動キャプチャ |
-| **呼び出し** | 常に読み込み(長くなりがち) | 必要に応じて関連コンテキストを表示 |
-| **範囲** | 一般的な設定 | プロジェクト固有のナレッジグラフ |
-
-**両方を併用できます。** CLAUDE.md は Claude に*どう*作業するかを伝えます。MeMesh は*何を*構築したかを記憶します。
-
----
-
-## 対応環境
-
-| プラットフォーム | 状態 |
-|----------------|------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅(WSL2 推奨) |
-
-**対応ツール:** Claude Code CLI · VS Code 拡張機能 · Cursor(MCP 経由) · その他 MCP 互換エディタ
-
----
-
-## アーキテクチャ
-
-MeMesh は Claude Code プラグインとしてローカルで動作し、MCP コンポーネントを内蔵しています:
-
-- **ナレッジグラフ** — FTS5 全文検索を備えた SQLite ベースのエンティティストア
-- **ベクトル埋め込み** — セマンティック類似性のための ONNX ランタイム(100% ローカルで動作)
-- **コンテンツ重複排除** — SHA-256 ハッシュで冗長な埋め込み計算をスキップ
-- **バッチ処理** — 大規模ナレッジベース向けの効率的な一括操作
-- **フックシステム** — セッション開始時、テスト失敗時、エラー発生時のプロアクティブな呼び出し
-
-すべてローカルで動作します。クラウドなし。API コールなし。データがマシンの外に出ることはありません。
-
----
-
-## ドキュメント
-
-| ドキュメント | 説明 |
-|------------|------|
-| [はじめに](docs/GETTING_STARTED.md) | ステップバイステップのセットアップガイド |
-| [ユーザーガイド](docs/USER_GUIDE.md) | 例付きの完全な使い方ガイド |
-| [コマンド](docs/COMMANDS.md) | コマンドリファレンス |
-| [アーキテクチャ](docs/ARCHITECTURE.md) | 技術的な詳細解説 |
-| [コントリビュート](CONTRIBUTING.md) | 貢献ガイドライン |
-| [開発ガイド](docs/DEVELOPMENT.md) | コントリビューター向けの開発環境構築 |
-
----
-
-## コントリビュート
-
-コントリビューションを歓迎します をご覧ください。
-
----
-
-## ライセンス
-
-MIT — [LICENSE](LICENSE) を参照
-
----
-
-
-
-**Claude Code で作られた、Claude Code のためのツール。**
-
-[バグ報告](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [機能リクエスト](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [ヘルプ](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.ko.md b/README.ko.md
deleted file mode 100644
index 11be0769..00000000
--- a/README.ko.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### AI 코딩 세션에도 기억이 필요합니다.
-
-MeMesh Plugin은 Claude Code에 영구적이고 검색 가능한 메모리를 제공하여, 모든 세션이 이전 세션 위에 쌓이도록 합니다.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[시작하기](#시작하기) · [작동 방식](#작동-방식) · [명령어](#명령어) · [문서](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · **한국어** · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **참고**: 이 프로젝트는 원래 "Claude Code Buddy"라는 이름이었으며, 잠재적인 상표 문제를 피하기 위해 MeMesh Plugin으로 이름이 변경되었습니다.
-
----
-
-## 문제점
-
-Claude Code로 프로젝트를 깊이 진행하고 있습니다. 세 번 전 세션에서 중요한 결정을 내렸습니다 — 어떤 인증 라이브러리를 쓸지, 왜 그 데이터베이스 스키마를 선택했는지, 어떤 패턴을 따를지. 하지만 Claude는 기억하지 못합니다. 같은 말을 반복하고, 컨텍스트를 잃고, 시간을 낭비합니다.
-
-**MeMesh가 이 문제를 해결합니다.** 프로젝트와 함께 성장하는 영구적이고 검색 가능한 메모리를 Claude에게 제공합니다.
-
----
-
-## 작동 방식
-
-
-
-
-
-### MeMesh 이전
-```
-Session 1: "JWT로 인증하자"
-Session 2: "왜 JWT를 선택했었지?"
-Session 3: "잠깐, 어떤 인증 라이브러리 쓰고 있었지?"
-```
-결정을 반복합니다. Claude가 컨텍스트를 잊습니다. 진행이 멈춥니다.
-
-
-
-
-### MeMesh 이후
-```
-Session 1: "JWT로 인증하자" → 저장됨
-Session 2: buddy-remember "auth" → 즉시 회상
-Session 3: 시작 시 컨텍스트 자동 로드
-```
-모든 세션이 이전에 멈춘 곳에서 이어집니다.
-
-
-
-
-
----
-
-## 제공하는 기능
-
-**검색 가능한 프로젝트 메모리** — "auth에 대해 뭘 결정했지?"라고 물으면 의미 기반으로 즉시 매칭된 답변을 받습니다. 키워드 검색이 아닌 *의미* 검색이며, 로컬 ONNX 임베딩으로 작동합니다.
-
-**스마트 작업 분석** — `buddy-do "add user auth"`는 단순히 실행만 하지 않습니다. 과거 세션에서 관련 컨텍스트를 가져오고, 수립된 패턴을 확인하고, 코드를 한 줄이라도 작성하기 전에 풍부한 계획을 세웁니다.
-
-**선제적 회상** — MeMesh는 세션 시작 시, 테스트 실패 시, 오류 발생 시 관련 기억을 자동으로 표시합니다. 수동 검색이 필요 없습니다.
-
-**워크플로우 자동화** — 시작 시 세션 요약. 파일 변경 추적. 커밋 전 코드 리뷰 알림. 모두 백그라운드에서 조용히 실행됩니다.
-
-**실수 학습** — 오류와 수정 사항을 기록하여 지식 베이스를 구축합니다. 같은 실수는 두 번 일어나지 않습니다.
-
----
-
-## 시작하기
-
-**필요 조건**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Claude Code를 재시작하면 끝입니다.
-
-**확인** — Claude Code에서 입력:
-
-```
-buddy-help
-```
-
-사용 가능한 명령어 목록이 표시되어야 합니다.
-
-
-소스에서 설치 (기여자용)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## 명령어
-
-| 명령어 | 기능 |
-|---------|------|
-| `buddy-do "작업"` | 전체 메모리 컨텍스트와 함께 작업 실행 |
-| `buddy-remember "주제"` | 과거 결정과 컨텍스트 검색 |
-| `buddy-help` | 사용 가능한 명령어 표시 |
-
-**실제 사용 예시:**
-
-```bash
-# 처음 접하는 코드베이스 파악하기
-buddy-do "explain this codebase"
-
-# 과거 작업의 컨텍스트로 기능 구현하기
-buddy-do "add user authentication"
-
-# 결정 이유 회상하기
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-모든 데이터는 로컬 머신에 저장되며, 자동으로 90일간 보관됩니다.
-
----
-
-## CLAUDE.md와 어떻게 다른가요?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **목적** | Claude를 위한 고정된 지시사항 | 프로젝트와 함께 성장하는 살아있는 메모리 |
-| **검색** | 수동 텍스트 검색 | 의미 기반 검색 |
-| **업데이트** | 수동 편집 | 작업하면서 결정사항을 자동 캡처 |
-| **회상** | 항상 로드됨 (길어질 수 있음) | 필요할 때 관련 컨텍스트를 표시 |
-| **범위** | 일반적인 설정 | 프로젝트별 지식 그래프 |
-
-**함께 사용하는 것입니다.** CLAUDE.md는 Claude에게 *어떻게* 작업할지 알려줍니다. MeMesh는 *무엇을* 만들었는지 기억합니다.
-
----
-
-## 플랫폼 지원
-
-| 플랫폼 | 상태 |
-|--------|------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 권장) |
-
-**호환 환경:** Claude Code CLI · VS Code Extension · Cursor (MCP 경유) · 기타 MCP 호환 에디터
-
----
-
-## 아키텍처
-
-MeMesh는 Claude Code 플러그인으로 로컬에서 실행되며, MCP 컴포넌트를 포함합니다:
-
-- **Knowledge Graph** — FTS5 전문 검색을 갖춘 SQLite 기반 엔티티 저장소
-- **Vector Embeddings** — 의미 유사도를 위한 ONNX 런타임 (100% 로컬 실행)
-- **Content Dedup** — SHA-256 해싱으로 중복 임베딩 연산 건너뛰기
-- **Batch Processing** — 대규모 지식 베이스를 위한 효율적인 일괄 처리
-- **Hook System** — 세션 시작, 테스트 실패, 오류 시 선제적 회상
-
-모든 것이 로컬에서 실행됩니다. 클라우드 없음. API 호출 없음. 데이터는 절대 머신 밖으로 나가지 않습니다.
-
----
-
-## 문서
-
-| 문서 | 설명 |
-|------|------|
-| [시작하기](docs/GETTING_STARTED.md) | 단계별 설정 가이드 |
-| [사용자 가이드](docs/USER_GUIDE.md) | 예제 포함 전체 사용 가이드 |
-| [명령어](docs/COMMANDS.md) | 전체 명령어 레퍼런스 |
-| [아키텍처](docs/ARCHITECTURE.md) | 기술 심층 분석 |
-| [기여하기](CONTRIBUTING.md) | 기여 가이드라인 |
-| [개발](docs/DEVELOPMENT.md) | 기여자를 위한 개발 환경 설정 |
-
----
-
-## 기여하기
-
-기여를 환영합니다! 시작하려면 [CONTRIBUTING.md](CONTRIBUTING.md)를 참고하세요.
-
----
-
-## 라이선스
-
-MIT — [LICENSE](LICENSE) 참조
-
----
-
-
-
-**Claude Code를 위해, Claude Code와 함께 만들었습니다.**
-
-[버그 리포트](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [기능 요청](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [도움 받기](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.md b/README.md
index 36551389..d82372b5 100644
--- a/README.md
+++ b/README.md
@@ -1,286 +1,86 @@
-
-
-
-
# MeMesh Plugin
-### Your AI coding sessions deserve memory.
-
-MeMesh Plugin gives Claude Code persistent, searchable memory — so every session builds on the last.
+Minimal persistent memory plugin for Claude Code. Remembers decisions, patterns, and context across sessions.
[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
[](LICENSE)
[](https://nodejs.org)
[](https://modelcontextprotocol.io)
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Get Started](#get-started) · [How It Works](#how-it-works) · [Commands](#commands) · [Docs](docs/USER_GUIDE.md)
-
-[繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **Note**: This project was originally called "Claude Code Buddy" and has been renamed to MeMesh Plugin to avoid potential trademark issues.
-
----
-
-## The Problem
-
-You're deep into a project with Claude Code. You made important decisions three sessions ago — which auth library, why you chose that database schema, what patterns to follow. But Claude doesn't remember. You repeat yourself. You lose context. You waste time.
-
-**MeMesh Plugin fixes this.** It gives Claude a persistent, searchable memory that grows with your project.
-
----
-
-## How It Works
-
-
-
-
-
-### Before MeMesh
-```
-Session 1: "Use JWT for auth"
-Session 2: "Why did we pick JWT again?"
-Session 3: "Wait, what auth library are we using?"
-```
-You repeat decisions. Claude forgets context. Progress stalls.
-
-
-
-
-### After MeMesh
-```
-Session 1: "Use JWT for auth" → saved
-Session 2: buddy-remember "auth" → instant recall
-Session 3: Context auto-loaded on start
-```
-Every session picks up where you left off.
-
-
-
-
-
----
-
-## What You Get
-
-**Searchable Project Memory** — Ask "what did we decide about auth?" and get an instant, semantically-matched answer. Not keyword search — *meaning* search, powered by local ONNX embeddings.
-
-**Smart Task Analysis** — `buddy-do "add user auth"` doesn't just execute. It pulls relevant context from past sessions, checks what patterns you've established, and builds an enriched plan before writing a single line.
-
-**Proactive Recall** — MeMesh automatically surfaces relevant memories when you start a session, hit a test failure, or encounter an error. No manual searching needed.
-
-**Workflow Automation** — Session recaps on startup. File change tracking. Code review reminders before commits. All running silently in the background.
-
-**Mistake Learning** — Record errors and fixes to build a knowledge base. The same mistake doesn't happen twice.
-
----
-
-## Get Started
-
-**Prerequisites**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
+## Installation
```bash
npm install -g @pcircle/memesh
```
-Restart Claude Code. That's it.
-
-**Verify** — type in Claude Code:
-
-```
-buddy-help
-```
+## What it does
-You should see a list of available commands.
+MeMesh gives Claude Code persistent memory through 3 MCP tools, 2 hooks, and a CLI dashboard:
-
-Install from source (contributors)
+### MCP Tools
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
+| Tool | Description |
+|------|-------------|
+| `remember` | Store knowledge — entities with observations, relations, and tags |
+| `recall` | Search stored knowledge via FTS5 full-text search with optional tag filtering |
+| `forget` | Delete stored knowledge by name (cascades to observations, relations, tags) |
-
+### Hooks
----
+| Hook | Event | What |
+|------|-------|------|
+| Session Start | `SessionStart` | Auto-recalls project-specific + recent global memories |
+| Post Commit | `PostToolUse` (Bash) | Records git commits as knowledge entities |
-## Commands
+### CLI
-| Command | What it does |
+| Command | Description |
|---------|-------------|
-| `buddy-do "task"` | Execute a task with full memory context |
-| `buddy-remember "topic"` | Search past decisions and context |
-| `buddy-help` | Show available commands |
-
-**Real examples:**
+| `memesh-view` | Generate and open an interactive HTML dashboard |
```bash
-# Get oriented in a new-to-you codebase
-buddy-do "explain this codebase"
-
-# Build features with context from past work
-buddy-do "add user authentication"
-
-# Recall why decisions were made
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
+memesh-view
```
-All data stays on your machine with automatic 90-day retention.
-
----
-
-## How is this different from CLAUDE.md?
-
-| | CLAUDE.md | MeMesh Plugin |
-|---|-----------|--------|
-| **Purpose** | Static instructions for Claude | Living memory that grows with your project |
-| **Search** | Manual text search | Semantic search by meaning |
-| **Updates** | You edit manually | Auto-captures decisions as you work |
-| **Recall** | Always loaded (can get long) | Surfaces relevant context on demand |
-| **Scope** | General preferences | Project-specific knowledge graph |
-
-**They work together.** CLAUDE.md tells Claude *how* to work. MeMesh Plugin remembers *what* you've built.
-
----
-
-## Platform Support
-
-| Platform | Status |
-|----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (WSL2 recommended) |
-
-**Works with:** Claude Code CLI · VS Code Extension · Cursor (via MCP) · Any MCP-compatible editor
-
----
-
-## Visual Explorer (Streamlit UI)
+
-MeMesh Plugin includes an interactive web UI for exploring your knowledge graph visually.
+## How it works
-**Dashboard** — Overview of your knowledge base with entity statistics, type distribution, tag trends, and growth over time.
-
-
-
-
-
-**KG Explorer** — Interactive graph visualization with color-coded entity types, relation edges, FTS5 full-text search, and filtering by type, tags, and date range.
-
-
-
-
-
-**Quick start:**
-
-```bash
-cd streamlit
-pip install -r requirements.txt
-streamlit run app.py
-```
-
----
+- **Storage**: SQLite database at `~/.memesh/knowledge-graph.db`
+- **Search**: FTS5 full-text search (no vector embeddings)
+- **Isolation**: Tag-based project filtering (`project:`)
+- **Schema**: entities, observations, relations, tags + FTS5 virtual table
+- **Validation**: All tool inputs validated with Zod schemas
## Architecture
-```mermaid
-graph TB
- CC[Claude Code CLI] <-->|MCP Protocol| MCP[MCP Server]
-
- MCP --> TH[Tool Handlers]
- TH --> MT[Memory Tools]
- TH --> ST[System Tools]
- TH --> HT[Hook Tools]
-
- MT --> UMS[Unified Memory Store]
- MT --> KG[Knowledge Graph]
-
- UMS --> SE[Search Engine]
- KG --> FTS[FTS5 Full-Text Search]
- KG --> VS[Vector Search]
-
- VS --> EMB[ONNX Embeddings]
- VS --> SVA[sqlite-vec Adapter]
-
- KG --> DB[(SQLite DB)]
- UMS --> DB
-
- DB -.->|Read-Only| ST_UI[Streamlit Visual Explorer]
-
- style CC fill:#4F46E5,color:#fff
- style MCP fill:#7C3AED,color:#fff
- style DB fill:#0891B2,color:#fff
- style ST_UI fill:#06B6D4,color:#fff
- style EMB fill:#10B981,color:#fff
```
-
-**How memory flows through the system:**
-
-```mermaid
-sequenceDiagram
- participant U as You (in Claude Code)
- participant M as MeMesh Plugin
- participant KG as Knowledge Graph
- participant E as Embeddings (ONNX)
-
- U->>M: "Use JWT for auth"
- M->>KG: Create entity (decision)
- KG->>E: Generate embedding (local)
- E-->>KG: 384-dim vector
- KG-->>M: Stored + auto-relations inferred
-
- Note over U,E: Next session...
-
- U->>M: buddy-remember "auth"
- M->>KG: Hybrid search (FTS5 + semantic)
- KG->>E: Encode query
- E-->>KG: Query vector
- KG-->>M: Ranked results
- M-->>U: "You decided JWT for auth because..."
+src/
+├── cli/
+│ └── view.ts # HTML dashboard generator (D3.js graph + stats)
+├── db.ts # SQLite database (open/close/migrate, FTS5)
+├── knowledge-graph.ts # Entity CRUD, relations, FTS5 search
+├── index.ts # Package exports
+└── mcp/
+ ├── server.ts # MCP server entry point (stdio transport)
+ └── tools.ts # 3 tool handlers + Zod validation
+
+scripts/hooks/
+├── session-start.js # Auto-recall on session start
+└── post-commit.js # Git commit tracking
```
-Everything runs locally. No cloud. No API calls. Your data never leaves your machine.
-
----
-
-## Documentation
+**Dependencies** (3): `better-sqlite3`, `@modelcontextprotocol/sdk`, `zod`
-| Doc | Description |
-|-----|-------------|
-| [Getting Started](docs/GETTING_STARTED.md) | Step-by-step setup guide |
-| [User Guide](docs/USER_GUIDE.md) | Full usage guide with examples |
-| [Commands](docs/COMMANDS.md) | Complete command reference |
-| [Architecture](docs/ARCHITECTURE.md) | Technical deep dive |
-| [Contributing](CONTRIBUTING.md) | Contribution guidelines |
-| [Development](docs/DEVELOPMENT.md) | Dev setup for contributors |
+## Development
----
-
-## Contributing
-
-We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) to get started.
-
----
+```bash
+npm install
+npm run build # TypeScript compile + chmod executables
+npm test # 73 tests (vitest, BDD style)
+npm run typecheck # tsc --noEmit
+```
## License
-MIT — See [LICENSE](LICENSE)
-
----
-
-
-
-**Built with Claude Code, for Claude Code.**
-
-[Report Bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Request Feature](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Get Help](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
+MIT
diff --git a/README.th.md b/README.th.md
deleted file mode 100644
index aefd5242..00000000
--- a/README.th.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### เซสชันเขียนโค้ดกับ AI ของคุณสมควรมีหน่วยความจำ
-
-MeMesh Plugin มอบหน่วยความจำถาวรที่ค้นหาได้ให้ Claude Code — ทำให้ทุกเซสชันต่อยอดจากเซสชันก่อนหน้า
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[เริ่มต้นใช้งาน](#เริ่มต้นใช้งาน) · [วิธีการทำงาน](#วิธีการทำงาน) · [คำสั่ง](#คำสั่ง) · [เอกสาร](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · **ภาษาไทย** · [Bahasa Indonesia](README.id.md)
-
-
-
-> **หมายเหตุ**: โปรเจกต์นี้เดิมชื่อ "Claude Code Buddy" และถูกเปลี่ยนชื่อเป็น MeMesh Plugin เพื่อหลีกเลี่ยงปัญหาเครื่องหมายการค้า
-
----
-
-## ปัญหา
-
-คุณกำลังทำงานโปรเจกต์กับ Claude Code อย่างเข้มข้น คุณตัดสินใจเรื่องสำคัญไปเมื่อสามเซสชันก่อน — จะใช้ไลบรารี auth ตัวไหน ทำไมถึงเลือกสคีมาฐานข้อมูลแบบนั้น ควรใช้รูปแบบอะไร แต่ Claude จำไม่ได้ คุณต้องพูดซ้ำ เสียบริบท เสียเวลา
-
-**MeMesh แก้ปัญหานี้** โดยมอบหน่วยความจำถาวรที่ค้นหาได้ซึ่งเติบโตไปพร้อมกับโปรเจกต์ของคุณ
-
----
-
-## วิธีการทำงาน
-
-
-
-
-
-### ก่อนใช้ MeMesh
-```
-Session 1: "Use JWT for auth"
-Session 2: "Why did we pick JWT again?"
-Session 3: "Wait, what auth library are we using?"
-```
-คุณต้องทวนการตัดสินใจซ้ำ Claude ลืมบริบท ความคืบหน้าหยุดชะงัก
-
-
-
-
-### หลังใช้ MeMesh
-```
-Session 1: "Use JWT for auth" → saved
-Session 2: buddy-remember "auth" → instant recall
-Session 3: Context auto-loaded on start
-```
-ทุกเซสชันเริ่มต่อจากจุดที่คุณหยุดไว้
-
-
-
-
-
----
-
-## สิ่งที่คุณจะได้รับ
-
-**หน่วยความจำโปรเจกต์ที่ค้นหาได้** — ถามว่า "เราตัดสินใจเรื่อง auth ยังไง?" แล้วได้คำตอบทันทีที่จับคู่ตามความหมาย ไม่ใช่การค้นหาด้วยคีย์เวิร์ด — แต่เป็นการค้นหาด้วย*ความหมาย* ขับเคลื่อนด้วย ONNX embeddings บนเครื่องของคุณ
-
-**การวิเคราะห์งานอัจฉริยะ** — `buddy-do "add user auth"` ไม่ได้แค่รันคำสั่ง มันดึงบริบทที่เกี่ยวข้องจากเซสชันก่อนหน้า ตรวจสอบรูปแบบที่คุณกำหนดไว้ และสร้างแผนที่สมบูรณ์ก่อนเขียนโค้ดแม้แต่บรรทัดเดียว
-
-**การเรียกคืนเชิงรุก** — MeMesh แสดงความทรงจำที่เกี่ยวข้องโดยอัตโนมัติเมื่อคุณเริ่มเซสชัน เจอเทสต์ล้มเหลว หรือพบข้อผิดพลาด ไม่ต้องค้นหาเอง
-
-**ระบบอัตโนมัติของเวิร์กโฟลว์** — สรุปเซสชันเมื่อเริ่มงาน ติดตามการเปลี่ยนแปลงไฟล์ เตือนให้รีวิวโค้ดก่อน commit ทั้งหมดทำงานเงียบๆ ในเบื้องหลัง
-
-**เรียนรู้จากข้อผิดพลาด** — บันทึกข้อผิดพลาดและการแก้ไขเพื่อสร้างฐานความรู้ ข้อผิดพลาดเดิมจะไม่เกิดขึ้นซ้ำ
-
----
-
-## เริ่มต้นใช้งาน
-
-**สิ่งที่ต้องมี**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-รีสตาร์ท Claude Code เท่านั้นเอง
-
-**ตรวจสอบ** — พิมพ์ใน Claude Code:
-
-```
-buddy-help
-```
-
-คุณจะเห็นรายการคำสั่งที่ใช้ได้
-
-
-ติดตั้งจากซอร์สโค้ด (สำหรับผู้ร่วมพัฒนา)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## คำสั่ง
-
-| คำสั่ง | ทำอะไร |
-|---------|-------------|
-| `buddy-do "task"` | รันงานพร้อมบริบทหน่วยความจำเต็มรูปแบบ |
-| `buddy-remember "topic"` | ค้นหาการตัดสินใจและบริบทที่ผ่านมา |
-| `buddy-help` | แสดงคำสั่งที่ใช้ได้ |
-
-**ตัวอย่างจริง:**
-
-```bash
-# ทำความเข้าใจ codebase ที่ไม่คุ้นเคย
-buddy-do "explain this codebase"
-
-# สร้างฟีเจอร์พร้อมบริบทจากงานที่ผ่านมา
-buddy-do "add user authentication"
-
-# เรียกดูเหตุผลของการตัดสินใจ
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-ข้อมูลทั้งหมดอยู่บนเครื่องของคุณ พร้อมการเก็บรักษาอัตโนมัติ 90 วัน
-
----
-
-## MeMesh ต่างจาก CLAUDE.md อย่างไร?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **วัตถุประสงค์** | คำสั่งคงที่สำหรับ Claude | หน่วยความจำที่มีชีวิตซึ่งเติบโตไปกับโปรเจกต์ |
-| **การค้นหา** | ค้นหาข้อความด้วยตนเอง | ค้นหาเชิงความหมายตามความหมาย |
-| **การอัปเดต** | คุณแก้ไขเอง | บันทึกการตัดสินใจอัตโนมัติขณะทำงาน |
-| **การเรียกคืน** | โหลดทุกครั้ง (อาจยาวมาก) | แสดงบริบทที่เกี่ยวข้องตามต้องการ |
-| **ขอบเขต** | การตั้งค่าทั่วไป | กราฟความรู้เฉพาะโปรเจกต์ |
-
-**ทั้งสองทำงานร่วมกัน** CLAUDE.md บอก Claude ว่าต้องทำงาน*อย่างไร* MeMesh จำว่าคุณ*สร้างอะไร*ไปบ้าง
-
----
-
-## รองรับแพลตฟอร์ม
-
-| แพลตฟอร์ม | สถานะ |
-|----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (แนะนำ WSL2) |
-
-**ใช้ร่วมกับ:** Claude Code CLI · VS Code Extension · Cursor (ผ่าน MCP) · เอดิเตอร์ใดก็ได้ที่รองรับ MCP
-
----
-
-## สถาปัตยกรรม
-
-MeMesh ทำงานเป็นปลั๊กอิน Claude Code บนเครื่อง พร้อมคอมโพเนนต์ MCP ในตัว:
-
-- **Knowledge Graph** — ที่เก็บ entity บน SQLite พร้อมการค้นหาข้อความเต็มรูปแบบ FTS5
-- **Vector Embeddings** — ONNX runtime สำหรับความคล้ายคลึงเชิงความหมาย (ทำงานบนเครื่อง 100%)
-- **Content Dedup** — แฮช SHA-256 ข้ามการคำนวณ embedding ที่ซ้ำซ้อน
-- **Batch Processing** — การดำเนินการจำนวนมากอย่างมีประสิทธิภาพสำหรับฐานความรู้ขนาดใหญ่
-- **Hook System** — เรียกคืนเชิงรุกเมื่อเริ่มเซสชัน เทสต์ล้มเหลว และเกิดข้อผิดพลาด
-
-ทุกอย่างทำงานบนเครื่อง ไม่ใช้คลาวด์ ไม่เรียก API ข้อมูลของคุณไม่ออกจากเครื่องเด็ดขาด
-
----
-
-## เอกสาร
-
-| เอกสาร | คำอธิบาย |
-|-----|-------------|
-| [เริ่มต้นใช้งาน](docs/GETTING_STARTED.md) | คู่มือการตั้งค่าทีละขั้นตอน |
-| [คู่มือผู้ใช้](docs/USER_GUIDE.md) | คู่มือการใช้งานฉบับเต็มพร้อมตัวอย่าง |
-| [คำสั่ง](docs/COMMANDS.md) | รายการคำสั่งทั้งหมด |
-| [สถาปัตยกรรม](docs/ARCHITECTURE.md) | รายละเอียดเชิงเทคนิคเชิงลึก |
-| [ร่วมพัฒนา](CONTRIBUTING.md) | แนวทางการร่วมพัฒนา |
-| [การพัฒนา](docs/DEVELOPMENT.md) | การตั้งค่าสำหรับผู้ร่วมพัฒนา |
-
----
-
-## ร่วมพัฒนา
-
-เรายินดีต้อนรับการร่วมพัฒนา! ดู [CONTRIBUTING.md](CONTRIBUTING.md) เพื่อเริ่มต้น
-
----
-
-## สัญญาอนุญาต
-
-MIT — ดู [LICENSE](LICENSE)
-
----
-
-
-
-**สร้างด้วย Claude Code เพื่อ Claude Code**
-
-[รายงานบัก](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [ขอฟีเจอร์](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [ขอความช่วยเหลือ](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.vi.md b/README.vi.md
deleted file mode 100644
index 25cda035..00000000
--- a/README.vi.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### Các phiên lập trình AI của bạn xứng đáng có bộ nhớ.
-
-MeMesh Plugin mang đến cho Claude Code bộ nhớ bền vững, có thể tìm kiếm — để mỗi phiên làm việc đều kế thừa từ phiên trước.
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[Bắt đầu](#bắt-đầu) · [Cách hoạt động](#cách-hoạt-động) · [Lệnh](#lệnh) · [Tài liệu](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · **Tiếng Việt** · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **Lưu ý**: Dự án này ban đầu có tên "Claude Code Buddy" và đã được đổi tên thành MeMesh Plugin để tránh các vấn đề về thương hiệu.
-
----
-
-## Vấn đề
-
-Bạn đang làm việc sâu với Claude Code trên một dự án. Bạn đã đưa ra những quyết định quan trọng cách đây ba phiên — thư viện xác thực nào, tại sao chọn schema cơ sở dữ liệu đó, những mẫu thiết kế nào cần tuân theo. Nhưng Claude không nhớ. Bạn phải lặp lại. Bạn mất ngữ cảnh. Bạn lãng phí thời gian.
-
-**MeMesh giải quyết vấn đề này.** Nó cung cấp cho Claude bộ nhớ bền vững, có thể tìm kiếm, phát triển cùng dự án của bạn.
-
----
-
-## Cách hoạt động
-
-
-
-
-
-### Trước MeMesh
-```
-Phiên 1: "Dùng JWT cho auth"
-Phiên 2: "Tại sao mình chọn JWT nhỉ?"
-Phiên 3: "Khoan, mình đang dùng thư viện auth nào?"
-```
-Bạn lặp lại các quyết định. Claude quên ngữ cảnh. Tiến độ bị đình trệ.
-
-
-
-
-### Sau MeMesh
-```
-Phiên 1: "Dùng JWT cho auth" → đã lưu
-Phiên 2: buddy-remember "auth" → nhớ lại ngay
-Phiên 3: Ngữ cảnh tự động tải khi bắt đầu
-```
-Mỗi phiên đều tiếp nối từ nơi bạn dừng lại.
-
-
-
-
-
----
-
-## Bạn được gì
-
-**Bộ nhớ dự án có thể tìm kiếm** — Hỏi "chúng ta đã quyết định gì về auth?" và nhận câu trả lời ngay lập tức, khớp theo ngữ nghĩa. Không phải tìm kiếm từ khóa — mà là tìm kiếm theo *ý nghĩa*, sử dụng ONNX embeddings chạy cục bộ.
-
-**Phân tích tác vụ thông minh** — `buddy-do "thêm user auth"` không chỉ đơn giản thực thi. Nó lấy ngữ cảnh liên quan từ các phiên trước, kiểm tra những mẫu thiết kế bạn đã thiết lập, và xây dựng kế hoạch chi tiết trước khi viết một dòng code nào.
-
-**Gợi nhớ chủ động** — MeMesh tự động hiển thị các ký ức liên quan khi bạn bắt đầu phiên, gặp lỗi kiểm thử, hoặc gặp lỗi. Không cần tìm kiếm thủ công.
-
-**Tự động hóa quy trình** — Tóm tắt phiên khi khởi động. Theo dõi thay đổi file. Nhắc review code trước khi commit. Tất cả chạy ngầm trong nền.
-
-**Học từ lỗi** — Ghi lại lỗi và cách sửa để xây dựng cơ sở tri thức. Cùng một lỗi không xảy ra hai lần.
-
----
-
-## Bắt đầu
-
-**Yêu cầu**: [Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Khởi động lại Claude Code. Xong.
-
-**Kiểm tra** — gõ trong Claude Code:
-
-```
-buddy-help
-```
-
-Bạn sẽ thấy danh sách các lệnh có sẵn.
-
-
-Cài đặt từ mã nguồn (dành cho người đóng góp)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## Lệnh
-
-| Lệnh | Chức năng |
-|---------|-------------|
-| `buddy-do "tác vụ"` | Thực thi tác vụ với đầy đủ ngữ cảnh bộ nhớ |
-| `buddy-remember "chủ đề"` | Tìm kiếm các quyết định và ngữ cảnh trước đó |
-| `buddy-help` | Hiển thị các lệnh có sẵn |
-
-**Ví dụ thực tế:**
-
-```bash
-# Làm quen với một codebase mới
-buddy-do "explain this codebase"
-
-# Xây dựng tính năng với ngữ cảnh từ công việc trước
-buddy-do "add user authentication"
-
-# Nhớ lại lý do đưa ra quyết định
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-Tất cả dữ liệu nằm trên máy bạn với thời gian lưu giữ tự động 90 ngày.
-
----
-
-## Khác gì so với CLAUDE.md?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **Mục đích** | Hướng dẫn tĩnh cho Claude | Bộ nhớ sống, phát triển cùng dự án |
-| **Tìm kiếm** | Tìm kiếm văn bản thủ công | Tìm kiếm ngữ nghĩa theo ý nghĩa |
-| **Cập nhật** | Bạn chỉnh sửa thủ công | Tự động ghi lại quyết định khi bạn làm việc |
-| **Gợi nhớ** | Luôn được tải (có thể rất dài) | Hiển thị ngữ cảnh liên quan theo yêu cầu |
-| **Phạm vi** | Tùy chọn chung | Đồ thị tri thức theo dự án |
-
-**Chúng hoạt động cùng nhau.** CLAUDE.md cho Claude biết *cách* làm việc. MeMesh ghi nhớ *những gì* bạn đã xây dựng.
-
----
-
-## Nền tảng hỗ trợ
-
-| Nền tảng | Trạng thái |
-|----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (khuyên dùng WSL2) |
-
-**Hoạt động với:** Claude Code CLI · VS Code Extension · Cursor (qua MCP) · Bất kỳ editor tương thích MCP nào
-
----
-
-## Kiến trúc
-
-MeMesh hoạt động như một plugin Claude Code chạy cục bộ, tích hợp thành phần MCP:
-
-- **Knowledge Graph** — Kho lưu trữ thực thể dựa trên SQLite với tìm kiếm toàn văn FTS5
-- **Vector Embeddings** — ONNX runtime cho tương đồng ngữ nghĩa (chạy 100% cục bộ)
-- **Content Dedup** — Băm SHA-256 bỏ qua tính toán embedding trùng lặp
-- **Batch Processing** — Xử lý hàng loạt hiệu quả cho cơ sở tri thức lớn
-- **Hook System** — Gợi nhớ chủ động khi bắt đầu phiên, lỗi kiểm thử và lỗi chung
-
-Mọi thứ chạy cục bộ. Không cloud. Không gọi API. Dữ liệu của bạn không bao giờ rời khỏi máy.
-
----
-
-## Tài liệu
-
-| Tài liệu | Mô tả |
-|-----|-------------|
-| [Bắt đầu](docs/GETTING_STARTED.md) | Hướng dẫn cài đặt từng bước |
-| [Hướng dẫn sử dụng](docs/USER_GUIDE.md) | Hướng dẫn đầy đủ với ví dụ |
-| [Lệnh](docs/COMMANDS.md) | Tham chiếu lệnh đầy đủ |
-| [Kiến trúc](docs/ARCHITECTURE.md) | Phân tích kỹ thuật chuyên sâu |
-| [Đóng góp](CONTRIBUTING.md) | Hướng dẫn đóng góp |
-| [Phát triển](docs/DEVELOPMENT.md) | Cài đặt môi trường cho người đóng góp |
-
----
-
-## Đóng góp
-
-Chúng tôi hoan nghênh đóng góp! Xem [CONTRIBUTING.md](CONTRIBUTING.md) để bắt đầu.
-
----
-
-## Giấy phép
-
-MIT — Xem [LICENSE](LICENSE)
-
----
-
-
-
-**Được xây dựng với Claude Code, cho Claude Code.**
-
-[Báo lỗi](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [Yêu cầu tính năng](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [Trợ giúp](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.zh-CN.md b/README.zh-CN.md
deleted file mode 100644
index a5a63272..00000000
--- a/README.zh-CN.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### 你的 AI 编程会话值得拥有记忆。
-
-MeMesh Plugin 为 Claude Code 提供持久的、可搜索的记忆 — 让每次会话都能承接上次的成果。
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[快速开始](#快速开始) · [运作原理](#运作原理) · [命令](#命令) · [文档](docs/USER_GUIDE.md)
-
-[English](README.md) · [繁體中文](README.zh-TW.md) · **简体中文** · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **注意**:本项目原名「Claude Code Buddy」,为避免潜在的商标问题已更名为 MeMesh Plugin。
-
----
-
-## 问题所在
-
-你正在用 Claude Code 深入开发一个项目。三个会话之前你做了重要决策 — 选了哪个认证库、为什么选择那个数据库架构、该遵循什么模式。但 Claude 不记得了。你不断重复自己说过的话。你失去了上下文。你浪费了时间。
-
-**MeMesh 解决了这个问题。** 它为 Claude 提供一个持久的、可搜索的记忆,随着你的项目一起成长。
-
----
-
-## 运作原理
-
-
-
-
-
-### 没有 MeMesh 之前
-```
-Session 1: "用 JWT 做认证"
-Session 2: "我们当时为什么选 JWT 来着?"
-Session 3: "等等,我们用的是什么认证库?"
-```
-你不断重复决策。Claude 忘记上下文。进度停滞。
-
-
-
-
-### 有了 MeMesh 之后
-```
-Session 1: "用 JWT 做认证" → 已保存
-Session 2: buddy-remember "auth" → 即时回忆
-Session 3: 启动时自动加载上下文
-```
-每次会话都能从上次中断的地方继续。
-
-
-
-
-
----
-
-## 你能获得什么
-
-**可搜索的项目记忆** — 问"我们之前怎么决定 auth 的?"就能得到即时的、语义匹配的答案。不是关键字搜索 — 是*语义*搜索,由本地 ONNX 嵌入驱动。
-
-**智能任务分析** — `buddy-do "添加用户认证"` 不只是执行。它会从过去的会话中提取相关上下文,检查你已建立的模式,并在写任何一行代码之前制定完整的计划。
-
-**主动回忆** — MeMesh 在你开始会话、遇到测试失败或出现错误时,自动浮现相关记忆。无需手动搜索。
-
-**工作流自动化** — 启动时展示会话回顾。文件变更追踪。提交前的代码审查提醒。所有这些都在后台静默运行。
-
-**错误学习** — 记录错误和修复方式,构建知识库。同样的错误不会再犯第二次。
-
----
-
-## 快速开始
-
-**前提条件**:[Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-重启 Claude Code。搞定。
-
-**验证** — 在 Claude Code 中输入:
-
-```
-buddy-help
-```
-
-你应该能看到可用命令列表。
-
-
-从源码安装 (贡献者)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## 命令
-
-| 命令 | 功能 |
-|---------|-------------|
-| `buddy-do "任务"` | 带着完整记忆上下文执行任务 |
-| `buddy-remember "主题"` | 搜索过去的决策和上下文 |
-| `buddy-help` | 显示可用命令 |
-
-**实际示例:**
-
-```bash
-# 快速了解一个陌生的代码库
-buddy-do "explain this codebase"
-
-# 带着过去工作的上下文构建功能
-buddy-do "add user authentication"
-
-# 回忆当初为什么做了那些决策
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-所有数据都保存在你的本机上,自动保留 90 天。
-
----
-
-## 这和 CLAUDE.md 有什么不同?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **用途** | 给 Claude 的静态指令 | 随项目成长的活记忆 |
-| **搜索** | 手动文本搜索 | 基于语义的意义搜索 |
-| **更新** | 你手动编辑 | 工作时自动捕获决策 |
-| **回忆** | 始终加载(可能变得很长) | 按需浮现相关上下文 |
-| **范围** | 通用偏好设置 | 项目专属的知识图谱 |
-
-**它们协同工作。** CLAUDE.md 告诉 Claude *怎么*工作。MeMesh 记住你*做了*什么。
-
----
-
-## 平台支持
-
-| 平台 | 状态 |
-|----------|--------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅(建议使用 WSL2) |
-
-**兼容:** Claude Code CLI · VS Code 扩展 · Cursor(通过 MCP) · 任何兼容 MCP 的编辑器
-
----
-
-## 架构
-
-MeMesh 作为 Claude Code 插件在本地运行,内含 MCP 组件:
-
-- **知识图谱** — 基于 SQLite 的实体存储,支持 FTS5 全文搜索
-- **向量嵌入** — ONNX 运行时实现语义相似度(100% 本地运行)
-- **内容去重** — SHA-256 哈希跳过冗余的嵌入计算
-- **批量处理** — 高效的批量操作,适用于大型知识库
-- **Hook 系统** — 在会话开始、测试失败和错误发生时主动回忆
-
-一切都在本地运行。无需云服务。无需 API 调用。你的数据永远不会离开你的机器。
-
----
-
-## 文档
-
-| 文档 | 说明 |
-|-----|-------------|
-| [快速开始](docs/GETTING_STARTED.md) | 分步设置指南 |
-| [使用指南](docs/USER_GUIDE.md) | 完整使用指南与示例 |
-| [命令参考](docs/COMMANDS.md) | 完整命令参考 |
-| [架构说明](docs/ARCHITECTURE.md) | 技术深度解析 |
-| [贡献指南](CONTRIBUTING.md) | 贡献指南 |
-| [开发指南](docs/DEVELOPMENT.md) | 贡献者开发设置 |
-
----
-
-## 贡献
-
-欢迎贡献!请参阅 [CONTRIBUTING.md](CONTRIBUTING.md) 开始。
-
----
-
-## 许可证
-
-MIT — 详见 [LICENSE](LICENSE)
-
----
-
-
-
-**用 Claude Code 构建,为 Claude Code 而生。**
-
-[报告 Bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [功能请求](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [获取帮助](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/README.zh-TW.md b/README.zh-TW.md
deleted file mode 100644
index 179a7bc9..00000000
--- a/README.zh-TW.md
+++ /dev/null
@@ -1,286 +0,0 @@
-
-
-
-
-# MeMesh Plugin
-
-### 你的 AI 程式開發應該有記憶。
-
-MeMesh Plugin 賦予 Claude Code 持久、可搜尋的記憶 — 讓每次對話都能延續上一次的成果。
-
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](https://www.npmjs.com/package/@pcircle/memesh)
-[](LICENSE)
-[](https://nodejs.org)
-[](https://modelcontextprotocol.io)
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-[開始使用](#開始使用) · [運作原理](#運作原理) · [指令](#指令) · [文件](docs/USER_GUIDE.md)
-
-[English](README.md) · **繁體中文** · [简体中文](README.zh-CN.md) · [日本語](README.ja.md) · [한국어](README.ko.md) · [Français](README.fr.md) · [Deutsch](README.de.md) · [Español](README.es.md) · [Tiếng Việt](README.vi.md) · [ภาษาไทย](README.th.md) · [Bahasa Indonesia](README.id.md)
-
-
-
-> **備註**:本專案原名「Claude Code Buddy」,為避免潛在的商標問題已更名為 MeMesh Plugin。
-
----
-
-## 問題所在
-
-你正在用 Claude Code 深入開發一個專案。三個 session 前你做了重要決策 — 用哪個 auth 函式庫、為什麼選了那個資料庫 schema、該遵循什麼模式。但 Claude 不記得了。你只能重複說明、失去脈絡、浪費時間。
-
-**MeMesh 解決了這個問題。** 它賦予 Claude 持久、可搜尋的記憶,隨著你的專案一起成長。
-
----
-
-## 運作原理
-
-
-
-
-
-### 沒有 MeMesh
-```
-Session 1: "用 JWT 做 auth"
-Session 2: "我們當初為什麼選 JWT?"
-Session 3: "等等,我們用的是哪個 auth 函式庫?"
-```
-你不斷重複決策。Claude 忘了脈絡。進度停滯。
-
-
-
-
-### 有了 MeMesh
-```
-Session 1: "用 JWT 做 auth" → 已儲存
-Session 2: buddy-remember "auth" → 即時回憶
-Session 3: 啟動時自動載入脈絡
-```
-每次 session 都能接續上次的進度。
-
-
-
-
-
----
-
-## 你能獲得什麼
-
-**可搜尋的專案記憶** — 問「我們之前怎麼決定 auth 的?」就能即時得到語意匹配的答案。不是關鍵字搜尋 — 而是*語意*搜尋,由本地 ONNX embeddings 驅動。
-
-**智慧任務分析** — `buddy-do "加上使用者認證"` 不只是執行。它會從過去的 session 中提取相關脈絡、檢查你已建立的模式,在寫下任何一行程式碼之前先建構完整的計畫。
-
-**主動回憶** — MeMesh 在你開始 session、遇到測試失敗或碰到錯誤時,自動浮現相關記憶。不需要手動搜尋。
-
-**工作流自動化** — 啟動時顯示 session 回顧、追蹤檔案變更、commit 前提醒 code review。全部在背景靜靜運行。
-
-**錯誤學習** — 記錄錯誤和修復方式來建立知識庫。同樣的錯誤不會再犯第二次。
-
----
-
-## 開始使用
-
-**前置需求**:[Claude Code](https://docs.anthropic.com/en/docs/claude-code) + Node.js 20+
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-重啟 Claude Code,完成。
-
-**驗證** — 在 Claude Code 中輸入:
-
-```
-buddy-help
-```
-
-你應該會看到可用指令的列表。
-
-
-從原始碼安裝 (貢獻者)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-
-
----
-
-## 指令
-
-| 指令 | 功能說明 |
-|------|----------|
-| `buddy-do "任務"` | 帶著完整記憶脈絡執行任務 |
-| `buddy-remember "主題"` | 搜尋過去的決策和脈絡 |
-| `buddy-help` | 顯示可用指令 |
-
-**實際範例:**
-
-```bash
-# 快速了解一個不熟悉的 codebase
-buddy-do "explain this codebase"
-
-# 帶著過去工作的脈絡來開發功能
-buddy-do "add user authentication"
-
-# 回顧決策的原因
-buddy-remember "API design decisions"
-buddy-remember "why we chose PostgreSQL"
-```
-
-所有資料都存在你的電腦上,統一採用 90 天自動保留機制。
-
----
-
-## 這跟 CLAUDE.md 有什麼不同?
-
-| | CLAUDE.md | MeMesh |
-|---|-----------|--------|
-| **用途** | 給 Claude 的靜態指令 | 隨專案成長的活記憶 |
-| **搜尋** | 手動文字搜尋 | 依語意搜尋 |
-| **更新** | 你手動編輯 | 工作時自動捕捉決策 |
-| **回憶** | 永遠載入(可能變很長) | 按需浮現相關脈絡 |
-| **範圍** | 一般偏好設定 | 專案專屬的知識圖譜 |
-
-**它們可以搭配使用。** CLAUDE.md 告訴 Claude *如何*工作。MeMesh 記住你*建了什麼*。
-
----
-
-## 平台支援
-
-| 平台 | 狀態 |
-|------|------|
-| macOS | ✅ |
-| Linux | ✅ |
-| Windows | ✅ (建議 WSL2) |
-
-**可搭配使用:** Claude Code CLI · VS Code Extension · Cursor (透過 MCP) · 任何相容 MCP 的編輯器
-
----
-
-## 視覺化瀏覽器(Streamlit UI)
-
-MeMesh 內建互動式 Web UI,讓你直觀地探索知識圖譜。
-
-**Dashboard** — 知識庫總覽,包含實體統計、類型分佈、標籤趨勢與成長曲線。
-
-
-
-
-
-**KG Explorer** — 互動式圖譜視覺化,支援按顏色區分實體類型、關係連線、FTS5 全文搜尋,以及按類型、標籤和日期範圍篩選。
-
-
-
-
-
-**快速啟動:**
-
-```bash
-cd streamlit
-pip install -r requirements.txt
-streamlit run app.py
-```
-
----
-
-## 架構
-
-```mermaid
-graph TB
- CC[Claude Code CLI] <-->|MCP Protocol| MCP[MCP Server]
-
- MCP --> TH[Tool Handlers]
- TH --> MT[Memory Tools]
- TH --> ST[System Tools]
- TH --> HT[Hook Tools]
-
- MT --> UMS[Unified Memory Store]
- MT --> KG[Knowledge Graph]
-
- UMS --> SE[Search Engine]
- KG --> FTS[FTS5 全文搜尋]
- KG --> VS[向量搜尋]
-
- VS --> EMB[ONNX 嵌入模型]
- VS --> SVA[sqlite-vec Adapter]
-
- KG --> DB[(SQLite DB)]
- UMS --> DB
-
- DB -.->|唯讀| ST_UI[Streamlit 視覺化瀏覽器]
-
- style CC fill:#4F46E5,color:#fff
- style MCP fill:#7C3AED,color:#fff
- style DB fill:#0891B2,color:#fff
- style ST_UI fill:#06B6D4,color:#fff
- style EMB fill:#10B981,color:#fff
-```
-
-**記憶如何在系統中流動:**
-
-```mermaid
-sequenceDiagram
- participant U as 你(在 Claude Code 中)
- participant M as MeMesh Plugin
- participant KG as 知識圖譜
- participant E as 嵌入模型(ONNX)
-
- U->>M: 「使用 JWT 做認證」
- M->>KG: 建立實體(decision)
- KG->>E: 產生嵌入向量(本地)
- E-->>KG: 384 維向量
- KG-->>M: 已儲存 + 自動推斷關係
-
- Note over U,E: 下一個 session...
-
- U->>M: buddy-remember "auth"
- M->>KG: 混合搜尋(FTS5 + 語意)
- KG->>E: 編碼查詢
- E-->>KG: 查詢向量
- KG-->>M: 排序後的結果
- M-->>U: 「你決定用 JWT 做認證,因為...」
-```
-
-一切在本地運行。沒有雲端。沒有 API 呼叫。你的資料永遠不會離開你的電腦。
-
----
-
-## 文件
-
-| 文件 | 說明 |
-|------|------|
-| [快速開始](docs/GETTING_STARTED.md) | 一步步的安裝指南 |
-| [使用指南](docs/USER_GUIDE.md) | 完整使用指南與範例 |
-| [指令參考](docs/COMMANDS.md) | 完整的指令參考 |
-| [架構說明](docs/ARCHITECTURE.md) | 技術深入解析 |
-| [貢獻指南](CONTRIBUTING.md) | 貢獻者指南 |
-| [開發指南](docs/DEVELOPMENT.md) | 給貢獻者的開發設定 |
-
----
-
-## 貢獻
-
-歡迎貢獻!請參閱 [CONTRIBUTING.md](CONTRIBUTING.md) 開始。
-
----
-
-## 授權
-
-MIT — 詳見 [LICENSE](LICENSE)
-
----
-
-
-
-**用 Claude Code 打造,為 Claude Code 而生。**
-
-[回報 Bug](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=bug&template=bug_report.yml) · [功能請求](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions) · [取得協助](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new)
-
-
diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md
index afafb593..f81184d0 100644
--- a/docs/ARCHITECTURE.md
+++ b/docs/ARCHITECTURE.md
@@ -1,679 +1,183 @@
# MeMesh Plugin Architecture
-**Status**: Active
+**Version**: 3.0.0
---
## Overview
-MeMesh Plugin is a Model Context Protocol (MCP) server that enhances Claude Code with persistent memory, context-aware task execution, and knowledge management capabilities. It follows a layered architecture designed for extensibility, performance, and reliability.
-
-## Architecture Diagram
+MeMesh is a minimal persistent memory plugin for Claude Code. It provides 3 MCP tools (`remember`, `recall`, `forget`) and 2 hooks (session start, post commit), backed by SQLite with FTS5 full-text search.
```
-┌─────────────────────────────────────────────────────────────┐
-│ Claude Code CLI │
-└────────────────────────┬────────────────────────────────────┘
- │ MCP Protocol (JSON-RPC over stdio)
-┌────────────────────────┴────────────────────────────────────┐
-│ MCP Server Layer (src/mcp/) │
-│ ┌──────────────────┐ ┌──────────────┐ ┌──────────────┐ │
-│ │ ToolHandlers │ │ Resources │ │ Prompts │ │
-│ │ (facade) │ └──────┬───────┘ └──────┬───────┘ │
-│ │ ├ MemoryTool │ │ │ │
-│ │ ├ SystemTool │ │ │ │
-│ │ └ HookTool │ │ │ │
-│ └────────┬─────────┘ │ │ │
-│ ┌────────┴─────────┐ │ │ │
-│ │StdinBufferManager│ │ │ │
-│ └──────────────────┘ │ │ │
-└─────────┼─────────────────────┼──────────────────┼──────────┘
- │ │ │
-┌─────────┴─────────────────────┴──────────────────┴──────────┐
-│ Core Business Logic │
-│ ┌──────────────────────────────────────────────────────┐ │
-│ │ Memory System (src/memory/) │ │
-│ │ ├─ UnifiedMemoryStore │ │
-│ │ ├─ MemorySearchEngine (search/filter/rank/dedup) │ │
-│ │ ├─ ProjectAutoTracker │ │
-│ │ ├─ ProactiveRecaller (session/test/error triggers) │ │
-│ │ └─ MistakePatternEngine │ │
-│ └──────────────────────────────────────────────────────┘ │
-│ ┌──────────────────────────────────────────────────────┐ │
-│ │ Knowledge Graph (src/knowledge-graph/) │ │
-│ │ ├─ KGSearchEngine (FTS5 + semantic + hybrid search) │ │
-│ │ ├─ ContentHasher (SHA-256 embedding dedup) │ │
-│ │ ├─ createEntitiesBatch() (batch transactions) │ │
-│ │ └─ Entity Relationship Management │ │
-│ └──────────────────────────────────────────────────────┘ │
-│ ┌──────────────────────────────────────────────────────┐ │
-│ │ Embeddings (src/embeddings/) │ │
-│ │ ├─ EmbeddingService (LRU cache, batch encode) │ │
-│ │ ├─ VectorSearchAdapter (Strategy interface) │ │
-│ │ │ ├─ SqliteVecAdapter (sqlite-vec 0.1.3) │ │
-│ │ │ └─ InMemoryVectorAdapter (pure TS, testing) │ │
-│ │ └─ ModelManager (Xenova/all-MiniLM-L6-v2, 384-dim) │ │
-│ └──────────────────────────────────────────────────────┘ │
-│ ┌──────────────────────────────────────────────────────┐ │
-│ │ Core (src/core/) │ │
-│ │ ├─ HookIntegration → GitCommandParser │ │
-│ │ ├─ CheckpointDetector │ │
-│ │ └─ HealthCheck, ResourceMonitor │ │
-│ └──────────────────────────────────────────────────────┘ │
-└──────────────────────────┬───────────────────────────────────┘
- │
-┌──────────────────────────┴───────────────────────────────────┐
-│ Data Persistence Layer │
-│ ┌──────────────────────────────────────────────────────┐ │
-│ │ Database (src/db/) │ │
-│ │ ├─ Connection Pool (better-sqlite3) │ │
-│ │ ├─ Migrations & Schema │ │
-│ │ └─ FTS5 + Vector Extensions │ │
-│ └──────────────────────────────────────────────────────┘ │
-└──────────────────────────┬───────────────────────────────────┘
- │ SQLite (read-only)
-┌──────────────────────────┴───────────────────────────────────┐
-│ Visual Explorer (streamlit/) │
-│ ├─ Dashboard: stats, charts (Plotly), entity growth │
-│ ├─ KG Explorer: interactive graph (streamlit-agraph/vis.js) │
-│ ├─ FTS5 full-text search + type/tag/date filters │
-│ └─ Relation backfill script (3-layer strategy) │
-└──────────────────────────────────────────────────────────────┘
+Claude Code CLI <--stdio--> MCP Server <--> KnowledgeGraph <--> SQLite (FTS5)
```
---
-## Layer Details
-
-### 1. MCP Server Layer (`src/mcp/`)
-
-**Purpose**: Implements the Model Context Protocol specification to communicate with Claude Code.
-
-**Components**:
-- **server.ts**: MCP server initialization and lifecycle management
-- **server-bootstrap.ts**: Entry point for npm binary; handles CLI vs MCP mode detection, daemon/proxy bootstrap, global `unhandledRejection`/`uncaughtException` handlers, and background ONNX model preloading in daemon mode
-- **StdinBufferManager.ts**: Pauses and buffers stdin during bootstrap to prevent "Method not found" errors when Claude Code sends `initialize` before the transport is connected
-- **ToolHandlers.ts**: Thin facade (~137 lines) that dispatches to focused sub-handlers:
- - **MemoryToolHandler.ts**: Entity/relation/recall/mistake operations
- - **SystemToolHandler.ts**: Skills, uninstall, test generation
- - **HookToolHandler.ts**: Hook tool-use tracking
-- **ToolRouter.ts**: Routes incoming MCP tool calls to ToolHandlers
-- **BuddyCommands.ts**: Command definitions and help text (no longer handles routing)
-- **ToolDefinitions.ts**: MCP tool schema definitions
-- **validation.ts**: Input validation using Zod schemas
-- **ProgressReporter.ts**: Real-time progress updates to Claude Code
-
-**Communication**: JSON-RPC 2.0 over stdio
-
----
-
-### 2. Memory System (`src/memory/`)
-
-**Purpose**: Persistent, context-aware memory management for Claude Code sessions.
-
-**Components**:
-
-#### UnifiedMemoryStore
-- Central memory management interface
-- CRUD operations for memories
-- Semantic search using embeddings
-- FTS5 full-text search integration
-
-#### MemorySearchEngine
-- Search logic extracted from UnifiedMemoryStore
-- Content-based query filtering (substring match)
-- Search filter application (time range, importance, type, limit)
-- Result deduplication by content hash
-- Relevance ranking (multi-factor scoring: exact match, tag match, TF, recency)
-
-#### ProjectAutoTracker
-- Automatic tracking of project context
-- File change detection via chokidar
-- Git integration for tracking commits
-
-#### ProactiveRecaller
-- Automatically surfaces relevant memories based on trigger context
-- Three triggers: `session-start` (hook output injection, top 5, >0.5 similarity), `test-failure` (top 3, >0.6), `error-detection` (top 3, >0.6)
-- Builds optimized search queries per trigger type (strips conventional commit prefixes, extracts test names, isolates first error line)
-- Session-start: integrated into `scripts/hooks/session-start.js` via FTS5 query
-- Test/error: integrated into `scripts/hooks/post-tool-use.js` → writes `proactive-recall.json` → `HookToolHandler` reads and appends to MCP response
-
-#### MistakePatternEngine
-- Learns from user corrections
-- Detects recurring mistakes
-- Suggests improvements
-
-**Data Flow**:
-```
-User Input -> Memory Ingestion -> Vector Embedding -> SQLite Storage
- |
-User Query <- Similarity Search <- Vector Search + FTS5 Search
-```
-
----
-
-### 3. Knowledge Graph (`src/knowledge-graph/`)
-
-**Purpose**: Structured knowledge representation with relationships and semantic search.
-
-**Components**:
-
-#### KnowledgeGraph (`index.ts`)
-- Entity and relationship CRUD
-- Uses injected `VectorSearchAdapter` instance (via constructor) instead of static VectorExtension
-- `createEntitiesBatch()`: Wraps all entity creations in a single SQLite transaction for significantly better write performance; individual failures are caught without aborting the batch
-
-#### ContentHasher (`ContentHasher.ts`)
-- SHA-256 hash (truncated to 16 hex chars) of entity name + observations
-- Used by `generateEmbeddingAsync` and `generateBatchEmbeddingsAsync` to skip ONNX inference when content unchanged
-- Hash stored in `embedding_hashes` side table (vec0 virtual tables don't support ALTER TABLE)
-- Cleaned up on entity deletion
-
-#### KGSearchEngine (`KGSearchEngine.ts`)
-- Extracted from KnowledgeGraph to separate search concerns from CRUD
-- **FTS5 Full-Text Search**: Fast keyword-based search
-- **Semantic Search**: Vector similarity via injected VectorSearchAdapter
-- **Hybrid Search**: Combines FTS5 and vector results for optimal recall
-- Receives dependencies via constructor to avoid circular imports
-
-**Database Schema**:
-```sql
-entities (id, type, content, metadata, embedding[384])
-relationships (source_id, target_id, type, metadata)
-fts_entities (content) -- FTS5 virtual table
-embedding_hashes (entity_name PK, hash) -- Content hash for embedding dedup
-```
-
----
-
-### 4. Embeddings Layer (`src/embeddings/`)
-
-**Purpose**: Convert text to vector representations for semantic search.
-
-**Model**: `Xenova/all-MiniLM-L6-v2` (384 dimensions)
-- Small footprint (~25 MB)
-- Fast inference via ONNX runtime
-- Good quality for code/technical text
-
-**Components**:
-
-#### ModelManager.ts
-- Model download and validation
-- Cross-platform model directory resolution
-- Environment variable overrides (`MEMESH_MODEL_DIR`, `MEMESH_DATA_DIR`)
-
-#### EmbeddingService.ts
-- Batch embedding generation via `encodeBatch()` (parallel chunks of 10)
-- **LRU cache**: 500-entry cache for text embeddings; cache hits resolve nearly instantly
-- Cosine similarity calculation
-- Lazy-loading singleton (`LazyEmbeddingService`) for efficient resource management
-- ONNX model preloading in daemon mode eliminates 10-20s cold start
-
-#### VectorSearchAdapter.ts (Strategy Pattern Interface)
-- Decouples vector search from specific implementations
-- Defines operations: `loadExtension`, `createVectorTable`, `insertEmbedding`, `deleteEmbedding`, `knnSearch`, `getEmbedding`, `hasEmbedding`, `getEmbeddingCount`
-
-#### SqliteVecAdapter.ts
-- Concrete `VectorSearchAdapter` implementation using sqlite-vec
-- sqlite-vec pinned to v0.1.3 (stable)
-- Handles extension loading, virtual table creation, KNN search
-
-#### InMemoryVectorAdapter.ts
-- Pure TypeScript implementation (no native dependencies)
-- Uses in-memory Map + brute-force cosine similarity
-- Enables tests without native module compilation
-
----
-
-### 5. Core Layer (`src/core/`)
-
-**Purpose**: Shared business logic components.
+## Modules
-**Components**:
+### db.ts -- Database Layer
-#### HookIntegration.ts
-- Bridges Claude Code hooks with the checkpoint detection system
-- Monitors Write, Edit, Bash tool execution
-- Delegates git command detection to **GitCommandParser**
+Manages the SQLite connection lifecycle and schema initialization.
-#### GitCommandParser.ts
-- Extracted from HookIntegration for reusability
-- Static methods for git command classification (`isGitAdd`, `isGitCommit`, `extractCommitMessage`)
-- Test file/command detection (`isTestFile`, `isTestCommand`)
+- `openDatabase(path?)` -- Opens (or reuses) a SQLite connection
+- `closeDatabase()` -- Closes the connection
+- `getDatabase()` -- Returns the active connection (throws if not opened)
+- Schema: Creates tables (`entities`, `observations`, `relations`, `tags`) and FTS5 virtual table (`entities_fts`)
+- Pragmas: WAL journal mode, foreign keys enabled
-#### CheckpointDetector.ts
-- Detects workflow checkpoints from tool execution patterns
-- Checkpoint types: `code-written`, `test-complete`, `commit-ready`, `committed`
+Default database path: `~/.memesh/knowledge-graph.db` (overridable via `MEMESH_DB_PATH`).
-#### HealthCheck.ts / ResourceMonitor.ts
-- System health monitoring and resource usage tracking
+### knowledge-graph.ts -- Knowledge Graph
----
+CRUD operations and full-text search over the entity graph.
-### 6. Database Layer (`src/db/`)
+**Entity operations**:
+- `createEntity(name, type, opts?)` -- Insert or ignore, add observations/tags, rebuild FTS index
+- `createEntitiesBatch(entities[])` -- Wraps multiple creates in a single SQLite transaction
+- `getEntity(name)` -- Full entity with observations, tags, and relations
+- `deleteEntity(name)` -- Cascading delete (observations, relations, tags, FTS entry)
-**Purpose**: Persistent storage with connection pooling and migrations.
+**Relation operations**:
+- `createRelation(from, to, type, metadata?)` -- Insert or ignore
+- `getRelations(entityName)` -- All outgoing relations for an entity
-**Technology**: better-sqlite3 (synchronous SQLite)
+**Search**:
+- `search(query?, opts?)` -- FTS5 MATCH query with optional tag filtering
+- `listRecent(limit?)` -- Most recent entities by ID
-**Features**:
-- Connection pooling for concurrent access
-- Migration system for schema evolution
-- FTS5 extension for full-text search
-- sqlite-vec extension for vector operations
+FTS5 is configured as a contentless virtual table (`content=''`). The `rebuildFts()` method handles explicit insert/delete operations required by contentless FTS5.
-**Key Tables**:
-- `memories`: Core memory storage
-- `entities`: Knowledge graph entities
-- `tags`: Memory categorization
-- `sessions`: Session tracking
-- `embeddings`: Vector embeddings
+### mcp/server.ts -- MCP Server
----
+Entry point. Creates the MCP server with stdio transport, registers tool handlers, opens the database on startup.
-### 7. Integration Layer (`src/integrations/`)
+### mcp/tools.ts -- Tool Handlers
-**Purpose**: External system integrations and session management.
+Defines 3 tools with Zod validation schemas and handler functions:
-**Components**:
-- **session-memory/**: Claude Code session integration
- - SessionMemoryParser: Parse session transcripts
- - SessionMemoryIngester: Import session data
- - SessionContextInjector: Inject context into new sessions
+| Tool | Schema | Handler |
+|------|--------|---------|
+| `remember` | RememberSchema | Creates entity, adds observations/tags/relations |
+| `recall` | RecallSchema | Searches via FTS5, returns matching entities |
+| `forget` | ForgetSchema | Deletes entity by name |
----
+The `handleTool(name, args)` dispatcher validates input via Zod, then delegates to the appropriate handler.
-### 8. UI Layer (`src/ui/`)
+### cli/view.ts -- CLI Dashboard
-**Purpose**: Rich terminal UI for progress indication and data visualization.
+Generates a self-contained HTML dashboard for visualizing the knowledge graph.
-**Components**:
-- **ProgressRenderer.ts**: Real-time progress bars
-- **ResponseFormatter.ts**: Formatted output for buddy commands
-- **design-tokens.ts**: Color palette and typography
-- **accessibility.ts**: WCAG AA compliance, screen reader support
+- `memesh-view` CLI command (registered in `package.json` bin)
+- Reads all entities, observations, relations, and tags from the database
+- Produces a single HTML file with embedded D3.js force-directed graph, searchable entity table, and statistics
+- Opens the generated file in the default browser
---
-### 9. Visual Explorer (`streamlit/`)
-
-**Purpose**: Interactive web UI for exploring and visualizing the knowledge graph.
-
-**Components**:
-- **app.py**: Entry point, page routing, sidebar navigation with SVG logo
-- **db.py**: SQLite query layer — search, graph data, CRUD, FTS5 support
-- **path_resolver.py**: Auto-detects database at `~/.memesh/` or legacy `~/.claude-code-buddy/`
-- **views/dashboard.py**: Statistics cards, entity type pie chart, tag bar chart, growth line chart, recent entities table (Plotly)
-- **views/explorer.py**: Interactive graph visualization (`streamlit-agraph` / vis.js), color-coded entity types and relation edges, physics-based layout
-- **backfill_relations.py**: CLI script to generate relations for existing entities (3 layers: topic clustering, cross-type semantic, tag similarity)
-
-**Tech Stack**: Streamlit, streamlit-agraph, Plotly, Pandas
-
-**Data Access**: Read-only SQLite connection to the same `knowledge-graph.db` used by the MCP server.
-
----
-
-### 10. Utilities
-
-#### Config (`src/config/`)
-- Environment variable management
-- Model configuration
-- Database configuration
-
-#### Errors (`src/errors/`)
-- Custom error types
-- Error classification
-- Sanitization for telemetry
-
-#### Types (`src/types/`)
-- TypeScript type definitions
-- Shared interfaces
-
-#### Utils (`src/utils/`)
-- Logger (winston)
-- Rate limiter
-- LRU cache utilities
-- Validation helpers
-
----
-
-## Data Flow Examples
-
-### 1. Memory Storage Flow
-
-```
-buddy-do "implement auth"
- |
-ToolHandlers facade -> MemoryToolHandler
- |
-UnifiedMemoryStore.create()
- |
-Generate embedding (384-dim vector, LRU cache check)
- |
-Store in SQLite (memories + embeddings tables)
- |
-Index in FTS5
- |
-Auto-tag (category detection)
- |
-Return success to Claude Code
-```
-
-### 2. Memory Retrieval Flow
+## Data Flow
-```
-buddy-remember "auth"
- |
-ToolHandlers facade -> MemoryToolHandler
- |
-UnifiedMemoryStore.search() -> MemorySearchEngine
- |
-Parallel search:
- |- FTS5 keyword search
- |- Vector similarity search (cosine)
- |
-Merge, deduplicate, and rank results
- |
-Format response (ResponseFormatter)
- |
-Return to Claude Code
-```
-
-### 3. Knowledge Graph Batch Creation
+### Store knowledge (remember)
```
-memesh-create-entities [{...}, {...}, ...]
- |
-ToolHandlers facade -> MemoryToolHandler
- |
-KnowledgeGraph.createEntitiesBatch()
- |
-Single SQLite transaction wrapping N entity inserts
- |
-Per-entity: validate -> insert -> FTS5 index (embedding skipped)
- |
-After transaction: hash-dedup check -> encodeBatch() -> bulk insert embeddings
- |
-Return per-entity success/failure results
+Tool call: remember({name, type, observations, tags, relations})
+ -> Zod validation (RememberSchema)
+ -> KnowledgeGraph.createEntity(name, type, {observations, tags})
+ -> INSERT OR IGNORE into entities
+ -> INSERT observations
+ -> Rebuild FTS5 index
+ -> INSERT tags
+ -> KnowledgeGraph.createRelation() for each relation
+ -> Return {stored: true, entityId, ...}
```
----
-
-## Performance Characteristics
-
-| Operation | Typical Latency | Notes |
-|-----------|----------------|-------|
-| Memory write | < 10ms | Including embedding generation |
-| FTS5 search | < 5ms | Scales with corpus size |
-| Vector search | < 20ms | 384-dim cosine similarity |
-| Hybrid search | < 25ms | FTS5 + vector combined |
-| Embedding generation | ~50ms | First call; cached hits < 1ms |
-| Embedding batch (10) | ~100ms | Parallel chunks of 10 |
-| Batch entity creation | ~N*5ms | Single transaction, amortized |
-
-**Performance optimizations (v2.9.2+)**:
-- **Embedding LRU cache**: 500-entry cache eliminates redundant ONNX inference for repeated text
-- **Batch transactions**: `createEntitiesBatch()` uses a single SQLite transaction instead of N separate ones
-- **ONNX preloading**: Daemon mode preloads the embedding model in the background, eliminating 10-20s cold start on first semantic search
-- **encodeBatch parallelization**: Texts are encoded in parallel chunks of 10 for improved throughput
-- **Content hash dedup**: SHA-256 hash check skips ONNX inference when entity content unchanged (embedding_hashes table)
-- **Batch embedding**: `createEntitiesBatch` uses `encodeBatch()` instead of N individual `encode()` calls
-
----
-
-## Security & Privacy
-
-1. **Local-First Architecture**: All data stored locally in `~/.memesh/`
-2. **No External Calls**: Except configured AI providers (Anthropic)
-3. **SQL Injection Prevention**: Parameterized queries only
-4. **Input Validation**: Zod schemas for all user input
-5. **Path Traversal Protection**: Validated file paths
-6. **Screen Reader Support**: WCAG AA compliant UI
-
----
-
-## Extension Points
-
-### Adding New MCP Tools
-
-1. Define tool schema in `ToolDefinitions.ts`
-2. Implement handler in the appropriate sub-handler (`MemoryToolHandler.ts`, `SystemToolHandler.ts`, or `HookToolHandler.ts`)
-3. Register dispatch in `ToolHandlers.ts` facade
-4. Add validation schema
-5. Update documentation
-
-### Adding New Memory Types
-
-1. Create new table in migration
-2. Extend UnifiedMemoryStore interface
-3. Implement CRUD operations
-4. Add embedding support if needed
-
-### Adding New Vector Search Backends
+### Search knowledge (recall)
-1. Implement `VectorSearchAdapter` interface
-2. Handle extension loading, table creation, CRUD, and KNN search
-3. Inject into KnowledgeGraph constructor
-
-### Adding New Integrations
-
-1. Create new directory in `src/integrations/`
-2. Implement integration interface
-3. Add configuration
-4. Register with core system
-
----
-
-## Deployment Modes
-
-MeMesh supports three deployment modes to accommodate different environments:
-
-### 1. Standard Mode (Full Functionality)
-
-**When**: better-sqlite3 is available
-
-**Features**:
-- Full local Knowledge Graph with SQLite
-- All memory tools (buddy-do, buddy-remember, memesh-create-entities)
-- Vector embeddings and semantic search
-- FTS5 full-text search
-
-**Architecture**:
```
-Claude Code -stdio-> MCP Server
- |
- v
- KnowledgeGraph
- (SQLite+Vec)
-```
-
-**Use Cases**:
-- Claude Code CLI (recommended)
-- Claude Code VS Code Extension
-- Cursor (via MCP)
-- Local development
-
----
-
-### 2. Cloud-Only Mode (Partial Functionality)
-
-**When**: better-sqlite3 unavailable + MEMESH_API_KEY is configured
-
-**Features**:
-- MCP server starts successfully
-- Basic commands (buddy-help, memesh-generate-tests)
-- Local memory tools disabled (buddy-do, buddy-remember, memesh-create-entities, memesh-hook-tool-use, memesh-record-mistake, memesh-metrics)
-
-**Error Messages**:
+Tool call: recall({query, tag, limit})
+ -> Zod validation (RecallSchema)
+ -> KnowledgeGraph.search(query, {tag, limit})
+ -> FTS5 MATCH query against entities_fts
+ -> JOIN to entities table (contentless FTS5)
+ -> For each match: getEntity() to load full data
+ -> Filter by tag if specified
+ -> Return Entity[]
```
-Tool 'buddy-remember' is not available in cloud-only mode.
-
-This MCP server is running without local SQLite storage (better-sqlite3 unavailable).
-To use local memory tools:
-1. Install better-sqlite3: npm install better-sqlite3
-2. Restart the MCP server
-
-Local SQLite storage is required for memory features.
-```
+### Delete knowledge (forget)
-**Architecture**:
```
-Claude Code -stdio-> MCP Server (Cloud-Only)
- |
- v
- Basic Tools Only
- (no local storage)
+Tool call: forget({name})
+ -> Zod validation (ForgetSchema)
+ -> KnowledgeGraph.deleteEntity(name)
+ -> Delete FTS5 entry (contentless delete syntax)
+ -> DELETE FROM entities (CASCADE handles children)
+ -> Return {deleted: true/false}
```
-**Use Cases**:
-- Claude Desktop Cowork (sandbox environment)
-- Environments where native modules cannot compile
-- Read-only filesystems
-- Cloud-first workflows (future)
-
-**Limitations**:
-- No local Knowledge Graph
-- No vector embeddings
-- No FTS5 search
-- Memory tools return friendly errors
-
-**Why Cloud-Only Mode Exists**:
-
-Claude Desktop Cowork runs plugins in a restricted sandbox:
-1. **Read-only filesystem** - Cannot write to plugin directories
-2. **Blocked node-gyp compilation** - HTTP 403 when downloading Node.js headers
-3. **No prebuilt binaries** - better-sqlite3, onnxruntime-node, sqlite-vec don't ship ARM64 Linux binaries
-4. **Ephemeral storage** - `~/.memesh/` directory is session-scoped
-
-Cloud-only mode allows the MCP server to start successfully and provide cloud sync functionality while gracefully degrading memory-dependent features.
-
---
-### 3. Error Mode (Cannot Start)
-
-**When**: Both better-sqlite3 unavailable AND no MEMESH_API_KEY
+## Database Schema
-**Behavior**:
-```
-ConfigurationError: Cannot start MCP server without local SQLite or cloud configuration.
+```sql
+-- Core tables
+entities (id PK, name UNIQUE, type, created_at, metadata JSON)
+observations (id PK, entity_id FK, content, created_at)
+relations (id PK, from_entity_id FK, to_entity_id FK, relation_type, metadata JSON, UNIQUE constraint)
+tags (id PK, entity_id FK, tag)
-Please choose one of the following:
-1. Install better-sqlite3: npm install better-sqlite3
-2. Configure cloud access: export MEMESH_API_KEY="your-key"
-3. Use global installation: npm install -g @pcircle/memesh
+-- Indexes
+idx_tags_entity (entity_id)
+idx_tags_tag (tag)
+idx_observations_entity (entity_id)
+idx_relations_from (from_entity_id)
+idx_relations_to (to_entity_id)
-For detailed troubleshooting, see: docs/TROUBLESHOOTING.md
+-- FTS5 virtual table (contentless)
+entities_fts USING fts5(name, observations, content='', tokenize='unicode61 remove_diacritics 1')
```
-**Use Cases**: Configuration error, should not occur in normal usage
+Foreign key cascades: deleting an entity automatically deletes its observations, relations, and tags.
---
-### Mode Detection Logic
-
-**Implementation** (`src/mcp/ServerInitializer.ts`):
-
-```typescript
-const sqliteAvailability = await checkBetterSqlite3Availability();
-const cloudEnabled = isCloudEnabled();
-
-if (sqliteAvailability.available) {
- // Standard mode: Use local SQLite
- knowledgeGraph = KnowledgeGraph.createSync();
- projectMemoryManager = new ProjectMemoryManager(knowledgeGraph);
- cloudOnlyMode = false;
-} else if (cloudEnabled) {
- // Cloud-only mode: Degrade gracefully
- logger.warn('[ServerInitializer] Running in cloud-only mode');
- knowledgeGraph = undefined;
- projectMemoryManager = undefined;
- cloudOnlyMode = true;
-} else {
- // Error mode: Cannot start
- throw new ConfigurationError('Cannot start MCP server...');
-}
-```
-
-**Availability Check**:
-```typescript
-async function checkBetterSqlite3Availability(): Promise {
- try {
- await import('better-sqlite3');
- return { available: true };
- } catch (error) {
- return {
- available: false,
- reason: 'better-sqlite3 module not available',
- error: error instanceof Error ? error.message : String(error),
- };
- }
-}
-```
+## Hook Architecture
----
+Hooks are defined in `hooks/hooks.json` and executed by Claude Code at specific lifecycle events.
-### Future: Cloud-First Memory Architecture
+### Session Start (`scripts/hooks/session-start.js`)
-**Goal**: Full Claude Desktop Cowork support through cloud-first architecture
+- **Trigger**: `SessionStart` event (every new Claude Code session)
+- **Matcher**: `*` (all sessions)
+- **Behavior**: Opens the database, queries recent entities tagged with the current project, outputs a summary for Claude to use as context
-**Planned Implementation**:
-1. Cloud API endpoints for KG operations (create, recall, search)
-2. Memory tools proxy to cloud in cloud-only mode
-3. Shared KG accessible from any client
-4. No local persistence needed (cloud as source of truth)
+### Post Commit (`scripts/hooks/post-commit.js`)
-**Timeline**: Long-term (no ETA)
-
-**Related Issues**: #73, #76, #77
-
-See [docs/COWORK_SUPPORT.md](./COWORK_SUPPORT.md) for detailed Cowork support documentation.
-
----
-
-## Testing Strategy
-
-- **Unit Tests**: `vitest` for individual components
-- **Integration Tests**: End-to-end memory flows
-- **E2E Tests**: Full MCP protocol testing
-- **Installation Tests**: Verify npm package installation
-
-**Coverage Target**: >= 80% for critical paths
+- **Trigger**: `PostToolUse` event on `Bash` tool
+- **Matcher**: `Bash` (filters for git commit commands)
+- **Behavior**: Detects git commit messages from tool output, creates a `commit` entity with the commit message as an observation, tags with the project name
---
-## Deployment
-
-**Distribution**: npm package `@pcircle/memesh`
+## Testing
-**Installation**:
-```bash
-npm install -g @pcircle/memesh
-```
-
-**Binary**: `dist/mcp/server-bootstrap.js` (executable)
-
-**Claude Code Integration**: MCP server auto-managed via the plugin's `.mcp.json` (no manual configuration of `~/.claude/mcp_settings.json` needed)
-
----
+7 test files, 73 tests total:
-## Future Architecture Considerations
+| File | Tests | What it covers |
+|------|-------|---------------|
+| `tests/db.test.ts` | 10 | Database lifecycle, schema, FTS5 setup |
+| `tests/knowledge-graph.test.ts` | 18 | Entity CRUD, relations, search, batch ops |
+| `tests/tools.test.ts` | 15 | Tool validation, handler behavior, dispatcher |
+| `tests/installation.test.ts` | 7 | Package structure, required files exist |
+| `tests/hooks/session-start.test.ts` | 6 | Session start hook behavior |
+| `tests/hooks/post-commit.test.ts` | 7 | Post commit hook behavior |
+| `tests/cli/view.test.ts` | 10 | CLI dashboard generator, XSS prevention |
-- **Multi-Model Support**: Pluggable AI providers
-- **Distributed Memory**: Sync across devices
-- **Plugin System**: User-defined extensions
-- **Web Dashboard**: Browser-based memory management
+Framework: vitest (forks pool mode to avoid SIGSEGV with native modules).
---
## References
-- [Model Context Protocol Specification](https://modelcontextprotocol.io)
-- [MeMesh User Guide](./USER_GUIDE.md)
- [API Reference](./api/API_REFERENCE.md)
-- [Contributing Guide](./CONTRIBUTING.md)
-
----
-
-**Maintained by**: PCIRCLE-AI
-**License**: MIT
+- [Model Context Protocol](https://modelcontextprotocol.io)
diff --git a/docs/BEST_PRACTICES.md b/docs/BEST_PRACTICES.md
deleted file mode 100644
index f970c079..00000000
--- a/docs/BEST_PRACTICES.md
+++ /dev/null
@@ -1,704 +0,0 @@
-# Best Practices
-## Effective Workflows for MeMesh Plugin
-
-This guide provides proven workflows and best practices for using MeMesh effectively in your daily development work.
-
----
-
-## Table of Contents
-
-1. [Core Principles](#core-principles)
-2. [Memory Management](#memory-management)
-3. [Effective Task Descriptions](#effective-task-descriptions)
-4. [Workflow Patterns](#workflow-patterns)
-5. [Team Collaboration](#team-collaboration)
-6. [Performance Optimization](#performance-optimization)
-7. [Common Anti-Patterns](#common-anti-patterns)
-8. [Advanced Techniques](#advanced-techniques)
-
----
-
-## Core Principles
-
-### 1. Memory First
-
-**Always search before creating**:
-```bash
-# ❌ Bad: Start working without context
-buddy-do "implement login feature"
-
-# ✅ Good: Check for existing knowledge first
-buddy-remember "authentication"
-buddy-remember "login implementation"
-# Then work with context
-buddy-do "implement login feature using our existing auth patterns"
-```
-
-**Why**: Past decisions and solutions save time and ensure consistency.
-
----
-
-### 2. Store As You Work
-
-**Record decisions immediately**:
-```bash
-# ❌ Bad: Wait until end of day to record
-# ... work all day ...
-# Try to remember what you decided
-
-# ✅ Good: Store immediately after decision
-buddy-remember "Chose bcrypt over argon2 because team familiarity and proven track record"
-```
-
-**Why**: Fresh memories are more accurate and detailed.
-
----
-
-### 3. Context Over Keywords
-
-**Provide rich context**:
-```bash
-# ❌ Bad: Minimal information
-buddy-remember "using JWT"
-
-# ✅ Good: Full context with reasoning
-buddy-remember "Using JWT for authentication because it's stateless, scales horizontally, and works well with our microservices architecture. Token expiry set to 24 hours for security balance."
-```
-
-**Why**: Future you (and your team) will need the "why", not just the "what".
-
----
-
-### 4. Progressive Complexity
-
-**Start simple, then enhance**:
-```bash
-# ✅ Good: Progressive refinement
-buddy-do "plan user authentication system"
-# Review plan
-buddy-remember "Auth system will use JWT + refresh tokens"
-# Implement
-buddy-do "implement JWT authentication middleware"
-# Test and refine
-buddy-remember "JWT middleware handles token refresh automatically"
-```
-
-**Why**: Incremental progress with knowledge capture builds better systems.
-
----
-
-## Memory Management
-
-### Storage Patterns
-
-#### Decision Recording
-
-**Template**:
-```
-We [decision] because [reason]
-
-Example:
-buddy-remember "We chose PostgreSQL over MongoDB because we need ACID transactions for financial data and have complex relational queries"
-```
-
-#### Pattern Documentation
-
-**Template**:
-```
-All [entity type] follow [pattern] for [reason]
-
-Example:
-buddy-remember "All API endpoints follow /api/v1/{resource} pattern for consistent versioning and client compatibility"
-```
-
-#### Lesson Learned
-
-**Template**:
-```
-[Problem] was caused by [root cause]. Fixed by [solution]
-
-Example:
-buddy-remember "Memory leak in user service was caused by unclosed database connections. Fixed by implementing connection pooling with 'pg-pool' and proper cleanup in finally blocks"
-```
-
-#### Configuration Tracking
-
-**Template**:
-```
-[Environment] uses [configuration] for [purpose]
-
-Example:
-buddy-remember "Production uses AWS RDS PostgreSQL t3.large with 500GB SSD for main database, Multi-AZ for high availability"
-```
-
----
-
-### Search Strategies
-
-#### Keyword Search
-
-**Use domain terms**:
-```bash
-# ✅ Good: Specific domain keywords
-buddy-remember "JWT token expiry policy"
-buddy-remember "database migration process"
-buddy-remember "error handling patterns"
-```
-
-#### Temporal Search
-
-**Include timeframe hints**:
-```bash
-buddy-remember "recent authentication changes"
-buddy-remember "last week's bug fixes"
-buddy-remember "current API version"
-```
-
-#### Problem-Solution Search
-
-**Frame as question or problem**:
-```bash
-buddy-remember "how to handle session timeout"
-buddy-remember "why login fails intermittently"
-buddy-remember "database connection pool sizing"
-```
-
----
-
-### Memory Hygiene
-
-#### Regular Reviews
-
-**Weekly review pattern**:
-```bash
-# Monday: Review last week's decisions
-buddy-remember "decisions last week"
-
-# Friday: Document this week's learnings
-buddy-remember "Store summary: This week we implemented X, learned Y, decided Z"
-```
-
-#### Consolidation
-
-**Merge related memories**:
-```bash
-# After several auth-related memories
-buddy-remember "Authentication system overview: Uses JWT with 24h expiry, refresh tokens stored in Redis, bcrypt for passwords, rate limiting on /login"
-```
-
-#### Archival
-
-**Mark outdated knowledge**:
-```bash
-buddy-remember "DEPRECATED: Old auth system used sessions. Now using JWT as of 2026-01-20"
-```
-
----
-
-## Effective Task Descriptions
-
-### Task Decomposition
-
-**Break down complex tasks**:
-```bash
-# ❌ Bad: Single massive task
-buddy-do "implement complete user management system with auth, profiles, permissions, and admin dashboard"
-
-# ✅ Good: Decomposed tasks
-buddy-do "plan user management system architecture"
-# Review plan
-buddy-do "implement user authentication with JWT"
-buddy-do "implement user profile CRUD operations"
-buddy-do "implement role-based permissions"
-buddy-do "implement admin dashboard UI"
-```
-
-**Benefits**:
-- Clearer routing to specialized capabilities
-- Easier to track progress
-- Better context for each subtask
-- More manageable complexity
-
----
-
-### Task Description Quality
-
-**Effective task descriptions**:
-
-✅ **Good Examples**:
-```bash
-# Specific and goal-oriented
-buddy-do "refactor user service to use dependency injection for better testability"
-
-# Includes constraints
-buddy-do "implement rate limiting middleware that allows 100 requests per minute per IP"
-
-# Provides context
-buddy-do "fix login bug where sessions expire immediately - likely related to cookie domain configuration"
-```
-
-❌ **Bad Examples**:
-```bash
-# Too vague
-buddy-do "make the app better"
-
-# Multiple unrelated tasks
-buddy-do "fix login, add dark mode, update docs, and deploy to staging"
-
-# Missing context
-buddy-do "implement the thing we discussed"
-```
-
----
-
-### Capability Hints
-
-**Guide routing with keywords**:
-```bash
-# Frontend-focused
-buddy-do "create React component for user profile with Tailwind styling"
-
-# Backend-focused
-buddy-do "implement GraphQL API endpoint for user queries with authentication"
-
-# DevOps-focused
-buddy-do "setup GitHub Actions workflow for automated testing and deployment"
-
-# Security-focused
-buddy-do "audit authentication system for security vulnerabilities and SQL injection"
-```
-
----
-
-## Workflow Patterns
-
-### Daily Development Workflow
-
-```
-Morning:
-1. buddy-remember "yesterday's work"
-2. buddy-remember "current sprint tasks"
-3. Review and plan day's work
-
-During Work:
-4. buddy-do "" for each feature/fix
-5. buddy-remember "" after decisions
-6. buddy-remember "" after solving problems
-
-End of Day:
-7. buddy-remember "Today completed: "
-8. buddy-remember "Blocked on: " (if any)
-9. buddy-remember "Tomorrow: "
-```
-
----
-
-### Feature Development Workflow
-
-```
-Phase 1: Research & Planning
-→ buddy-remember "similar features"
-→ buddy-remember "architectural patterns"
-→ buddy-do "plan architecture"
-→ buddy-remember "Architectural decision: "
-
-Phase 2: Implementation
-→ buddy-do "implement backend"
-→ buddy-remember "Implementation uses: "
-→ buddy-do "implement frontend"
-→ buddy-remember "UI pattern follows: "
-
-Phase 3: Testing & Documentation
-→ buddy-do "create tests for "
-→ buddy-remember "Test coverage: "
-→ buddy-remember "Feature documentation: "
-
-Phase 4: Review & Learn
-→ buddy-do "review implementation"
-→ buddy-remember "Lessons learned: "
-```
-
----
-
-### Bug Fix Workflow
-
-```
-Step 1: Research
-→ buddy-remember ""
-→ buddy-remember "similar bugs"
-
-Step 2: Investigation
-→ buddy-do "investigate "
-→ buddy-remember "Root cause: "
-
-Step 3: Fix Implementation
-→ buddy-do "fix by "
-→ buddy-remember "Bug fix: caused by . Fixed by "
-
-Step 4: Prevention
-→ buddy-do "add tests to prevent regression"
-→ buddy-remember "Prevention: Added to catch "
-```
-
----
-
-### Code Review Workflow
-
-```
-Preparation:
-→ buddy-remember "code review checklist"
-→ buddy-remember "coding standards"
-
-Review:
-→ buddy-do "review for "
-→ buddy-remember "Review findings: "
-
-Follow-up:
-→ buddy-do "suggest improvements for "
-→ buddy-remember "Recommended improvements: "
-```
-
----
-
-## Team Collaboration
-
-### Onboarding New Team Members
-
-**Create onboarding guide**:
-```bash
-# Store essential information
-buddy-remember "Project architecture: Microservices with React frontend, Node.js backend, PostgreSQL database"
-
-buddy-remember "Development workflow: Feature branches → PR → Code review → Staging → Production"
-
-buddy-remember "Key conventions: API uses /api/v1 prefix, all async functions use try-catch, tests required for PRs"
-```
-
-**Share common queries**:
-```bash
-# New team member can search
-buddy-remember "how to setup development environment"
-buddy-remember "testing practices"
-buddy-remember "deployment process"
-```
-
----
-
-### Knowledge Sharing
-
-**Document tribal knowledge**:
-```bash
-buddy-remember "Why we use Redis: Session storage needs high performance, Redis provides sub-millisecond latency and automatic expiry"
-
-buddy-remember "Production deployment checklist: 1. Run migrations, 2. Deploy backend, 3. Verify health, 4. Deploy frontend, 5. Smoke tests"
-
-buddy-remember "Common gotchas: Database connection pool exhaustion happens under load. Monitor pool size and connection count"
-```
-
----
-
-### Decision Documentation
-
-**Record team decisions**:
-```bash
-buddy-remember "Team decision 2026-01-20: Migrate from REST to GraphQL for flexible querying. Agreed in sprint planning, starts Q2"
-
-buddy-remember "Architecture decision: Microservices communicate via event bus (RabbitMQ) for loose coupling and scalability"
-```
-
----
-
-## Performance Optimization
-
-### Efficient Searches
-
-**Optimize query patterns**:
-```bash
-# ❌ Slow: Too broad
-buddy-remember "everything about auth"
-
-# ✅ Fast: Specific keywords
-buddy-remember "JWT expiry policy"
-```
-
----
-
-### Batch Operations
-
-**Group related queries**:
-```bash
-# ❌ Inefficient: Multiple separate queries
-buddy-remember "API patterns"
-buddy-remember "API versioning"
-buddy-remember "API error handling"
-
-# ✅ Efficient: Combined search or comprehensive storage
-buddy-remember "API conventions: RESTful /api/v1 prefix, JSON responses, standard error format {error, message, code}"
-```
-
----
-
-### Storage Efficiency
-
-**Be concise but complete**:
-```bash
-# ❌ Too verbose
-buddy-remember "So we had this really long meeting and after discussing for hours we finally decided that maybe we should probably use PostgreSQL..."
-
-# ✅ Concise and clear
-buddy-remember "Chose PostgreSQL after evaluating MongoDB and MySQL. Decision factors: ACID compliance, JSON support, team expertise"
-```
-
----
-
-## Common Anti-Patterns
-
-### Anti-Pattern 1: Memory Hoarding
-
-**Problem**:
-```bash
-# Storing everything without filtering
-buddy-remember "Clicked save button"
-buddy-remember "Opened file"
-buddy-remember "Took break"
-```
-
-**Solution**:
-```bash
-# Store meaningful information only
-buddy-remember "Implemented save functionality with optimistic updates for better UX"
-```
-
----
-
-### Anti-Pattern 2: Vague Memories
-
-**Problem**:
-```bash
-buddy-remember "Fixed bug"
-buddy-remember "Updated code"
-```
-
-**Solution**:
-```bash
-buddy-remember "Fixed race condition in user service where concurrent requests created duplicate records. Added database constraint and request deduplication"
-```
-
----
-
-### Anti-Pattern 3: Never Searching
-
-**Problem**:
-```bash
-# Always creating new, never recalling
-buddy-do "implement authentication"
-# Reinvents patterns that already exist
-```
-
-**Solution**:
-```bash
-# Search first
-buddy-remember "authentication patterns"
-buddy-remember "existing auth implementation"
-# Then build with context
-buddy-do "implement authentication following existing patterns"
-```
-
----
-
-### Anti-Pattern 4: Task Overloading
-
-**Problem**:
-```bash
-buddy-do "implement entire e-commerce platform with cart, checkout, payments, shipping, inventory, admin panel"
-```
-
-**Solution**:
-```bash
-buddy-do "plan e-commerce platform architecture"
-# Then break into manageable tasks
-buddy-do "implement shopping cart functionality"
-buddy-do "implement checkout process"
-# etc.
-```
-
----
-
-### Anti-Pattern 5: Context-Free Storage
-
-**Problem**:
-```bash
-buddy-remember "Using Redis"
-buddy-remember "JWT expiry: 24h"
-```
-
-**Solution**:
-```bash
-buddy-remember "Using Redis for session storage because of sub-millisecond latency and built-in expiry"
-buddy-remember "JWT expiry set to 24 hours balancing security (limited exposure) and UX (not requiring frequent re-login)"
-```
-
----
-
-## Advanced Techniques
-
-### Templated Memories
-
-**Create reusable templates**:
-```bash
-# Decision template
-buddy-remember "DECISION: [What] because [Why]. Alternatives considered: [Options]. Trade-offs: [Pros/Cons]"
-
-# Bug fix template
-buddy-remember "BUG FIX: [Problem] caused by [Root Cause]. Fixed by [Solution]. Prevented by [Tests/Checks]"
-
-# Pattern template
-buddy-remember "PATTERN: All [Entity] follow [Pattern] for [Reason]. Example: [Code/Link]"
-```
-
----
-
-### Hierarchical Knowledge
-
-**Build knowledge layers**:
-```bash
-# Layer 1: High-level architecture
-buddy-remember "System architecture: Microservices - API Gateway, Auth Service, User Service, Order Service, Payment Service"
-
-# Layer 2: Service details
-buddy-remember "User Service: Node.js + Express, PostgreSQL, REST API, handles user CRUD and profile management"
-
-# Layer 3: Implementation specifics
-buddy-remember "User Service authentication: JWT middleware validates tokens, checks permissions, injects user context"
-```
-
----
-
-### Cross-Referencing
-
-**Link related knowledge**:
-```bash
-buddy-remember "Authentication uses JWT (see: JWT configuration). Tokens stored in Redis (see: Redis setup). Refresh every 24h (see: Token expiry policy)"
-```
-
----
-
-### Version Tracking
-
-**Track changes over time**:
-```bash
-buddy-remember "API v1: REST endpoints, deprecated 2026-01"
-buddy-remember "API v2: GraphQL, launched 2026-01, current standard"
-buddy-remember "API migration guide: v1→v2 mapping at /docs/api-migration"
-```
-
----
-
-### Problem-Solution Pairs
-
-**Document troubleshooting**:
-```bash
-buddy-remember "PROBLEM: High database CPU usage. CAUSE: Missing index on users.email. SOLUTION: Added index, CPU dropped 80%"
-
-buddy-remember "PROBLEM: Intermittent 500 errors. CAUSE: Connection pool exhaustion. SOLUTION: Increased pool size from 10 to 50, added connection monitoring"
-```
-
----
-
-## Workflow Checklists
-
-### Daily Checklist
-
-```
-□ Morning recall: buddy-remember "yesterday's progress"
-□ Check blockers: buddy-remember "known issues"
-□ Plan day: buddy-do "plan today's tasks"
-□ During work: Store decisions as they happen
-□ End of day: buddy-remember "today's summary"
-□ Document blockers: buddy-remember "tomorrow's priorities"
-```
-
----
-
-### Feature Completion Checklist
-
-```
-□ Feature implemented: buddy-do "implement "
-□ Tests written: buddy-do "create tests for "
-□ Documentation: buddy-remember " documentation"
-□ Decisions recorded: buddy-remember ""
-□ Patterns documented: buddy-remember ""
-□ Lessons captured: buddy-remember ""
-```
-
----
-
-### Sprint Planning Checklist
-
-```
-□ Review last sprint: buddy-remember "last sprint summary"
-□ Recall lessons: buddy-remember "lessons learned"
-□ Check patterns: buddy-remember "development patterns"
-□ Plan architecture: buddy-do "plan sprint features"
-□ Document plan: buddy-remember "sprint plan "
-□ Set expectations: buddy-remember "sprint goals and estimates"
-```
-
----
-
-## Summary
-
-### Key Takeaways
-
-1. **Search First**: Always check existing knowledge before starting work
-2. **Store Immediately**: Record decisions and learnings as they happen
-3. **Provide Context**: Include the "why", not just the "what"
-4. **Break Down Tasks**: Decompose complex work into manageable pieces
-5. **Be Specific**: Clear, detailed descriptions lead to better results
-6. **Review Regularly**: Weekly reviews keep knowledge fresh and relevant
-7. **Share Knowledge**: Document for your team, not just yourself
-8. **Template Consistently**: Use consistent formats for similar memories
-
-### Quick Reference
-
-```
-┌────────────────────────────────────────────────┐
-│ MeMesh Best Practices │
-├────────────────────────────────────────────────┤
-│ Memory Management │
-│ ✓ Search before creating │
-│ ✓ Store as you work │
-│ ✓ Include context and reasoning │
-│ ✓ Review and consolidate weekly │
-│ │
-│ Task Descriptions │
-│ ✓ Break down complex tasks │
-│ ✓ Use specific, goal-oriented descriptions │
-│ ✓ Provide constraints and context │
-│ ✓ Guide with capability keywords │
-│ │
-│ Workflows │
-│ ✓ Follow structured patterns │
-│ ✓ Document at each phase │
-│ ✓ Capture lessons learned │
-│ ✓ Share team knowledge │
-└────────────────────────────────────────────────┘
-```
-
----
-
-**Next Steps**:
-
-1. **Try a workflow**: Pick one pattern from this guide and use it today
-2. **Review your memories**: Check if they follow best practices
-3. **Share with team**: Establish team-wide conventions
-4. **Iterate**: Refine your patterns based on what works
-
----
-
-For more information:
-- [User Guide](./USER_GUIDE.md) - Complete command reference
-- [Quick Start](./QUICK_START.md) - Getting started guide
-- [Troubleshooting](./TROUBLESHOOTING.md) - Common issues and solutions
-
-**MeMesh** — Persistent memory for Claude Code
diff --git a/docs/CNAME b/docs/CNAME
deleted file mode 100644
index 507cb66c..00000000
--- a/docs/CNAME
+++ /dev/null
@@ -1 +0,0 @@
-ccb.pcircle.ai
\ No newline at end of file
diff --git a/docs/COMMANDS.md b/docs/COMMANDS.md
deleted file mode 100644
index 63bf898f..00000000
--- a/docs/COMMANDS.md
+++ /dev/null
@@ -1,509 +0,0 @@
-# MeMesh Plugin Commands Reference
-
-Complete reference for all MeMesh Plugin commands and tools.
-
-## Table of Contents
-
-- [Buddy Commands](#buddy-commands) (User-Friendly Layer)
-- [MCP Tools](#mcp-tools) (Direct Tool Access)
-- [Command Aliases](#command-aliases)
-- [Usage Examples](#usage-examples)
-
----
-
-## Buddy Commands
-
-Buddy commands provide a natural, conversational interface to MeMesh's functionality.
-
-### `buddy do `
-
-Execute any development task with memory-enhanced context.
-
-**What it does:**
-- Analyzes task complexity
-- Applies capability-focused prompt enhancement
-- Returns execution result with routing info
-
-**Parameters:**
-- `task` (required): Description of task to execute
-
-**Examples:**
-```bash
-buddy do setup authentication for the API
-buddy do refactor the user service
-buddy do fix the login bug we discussed
-buddy do write tests for auth.ts
-buddy do optimize this database query
-```
-
-**Aliases:**
-- `buddy help-with`
-- `buddy execute`
-- `buddy run`
-- `buddy task`
-
-**Routing Logic:**
-- MeMesh evaluates task complexity and capability keywords
-- Specialized prompt context is added for the detected capability focus
-- Estimated cost and complexity are included for transparency
-
----
-
-### `buddy remember [limit]`
-
-Recall project memory - past decisions, architecture choices, bug fixes, and patterns.
-
-**What it does:**
-- Searches knowledge graph for relevant memories
-- Returns past decisions and context
-- Helps maintain project consistency
-
-**Parameters:**
-- `query` (required): What to search for
-- `limit` (optional): Max number of results (1-50, default: 10)
-- `mode` (optional): Search mode — `keyword` (exact match), `semantic` (AI similarity), `hybrid` (both combined). Default: `hybrid`
-- `matchThreshold` (optional): Minimum match score (0-1). Higher values return fewer but more relevant results. Default: 0.3
-- `allProjects` (optional): Search across ALL projects, not just the current one. Default: false
-
-**Examples:**
-```bash
-buddy remember how we implemented authentication
-buddy remember api design decisions
-buddy remember database schema changes
-buddy remember why we chose TypeScript
-buddy remember authentication approach 10
-```
-
-**Aliases:**
-- `buddy recall`
-- `buddy retrieve`
-- `buddy search`
-- `buddy find`
-
-**Use Cases:**
-- Recall past architectural decisions
-- Remember why certain patterns were chosen
-- Review previous bug fixes
-- Maintain consistency across sessions
-
----
-
-### `buddy help [command]`
-
-Get help for all buddy commands or a specific command.
-
-**What it does:**
-- Shows command reference
-- Explains usage and parameters
-- Provides examples
-- Lists command aliases
-
-**Parameters:**
-- `command` (optional): Specific command to get help for
- - `do` - Help for buddy do
- - `remember` - Help for buddy remember
- - (Leave empty for all commands)
-
-**Examples:**
-```bash
-buddy help # Show all commands
-buddy help do # Help for buddy do
-buddy help remember # Help for buddy remember
-```
-
----
-
-## MCP Tools
-
-Direct MCP tool access (for advanced users or MCP integrations).
-
-### `buddy-do`
-
-MCP tool version of `buddy do` command.
-
-**Input Schema:**
-```json
-{
- "task": "string (required) - Task description"
-}
-```
-
-**Example:**
-```json
-{
- "task": "setup authentication for the API"
-}
-```
-
----
-
-### `buddy-remember`
-
-MCP tool version of `buddy remember` command.
-
-**Input Schema:**
-```json
-{
- "query": "string (required) - Search query",
- "limit": "number (optional) - Max results (1-50, default: 10)",
- "mode": "string (optional) - Search mode: 'semantic', 'keyword', 'hybrid' (default: 'hybrid')",
- "matchThreshold": "number (optional) - Minimum match score 0-1 (default: 0.3)",
- "allProjects": "boolean (optional) - Search all projects (default: false)"
-}
-```
-
-**Example:**
-```json
-{
- "query": "authentication approach",
- "limit": 10,
- "mode": "hybrid",
- "matchThreshold": 0.5
-}
-```
-
----
-
-### `buddy-help`
-
-MCP tool version of `buddy help` command.
-
-**Input Schema:**
-```json
-{
- "command": "string (optional) - Command to get help for"
-}
-```
-
-**Example:**
-```json
-{
- "command": "do"
-}
-```
-
----
-
-### `memesh-generate-tests`
-
-Automatically generate comprehensive test cases from specifications or source code using AI.
-
-**Aliases:** `generate-tests` (deprecated, will be removed in v3.0.0)
-
-**Input Schema:**
-```json
-{
- "specification": "string (optional) - Feature or function specification",
- "code": "string (optional) - Source code to generate tests for"
-}
-```
-
-**Note:** Must provide either `specification` or `code`.
-
----
-
-### `memesh-hook-tool-use`
-
-Internal hook event ingestion for workflow automation and memory tracking (auto-triggered by Claude Code hooks, do not call manually).
-
-**Aliases:** `hook-tool-use` (deprecated, will be removed in v3.0.0)
-
-**Input Schema:**
-```json
-{
- "toolName": "string (required) - Tool executed by Claude Code",
- "arguments": "object (optional) - Tool arguments payload",
- "success": "boolean (required) - Whether execution succeeded",
- "duration": "number (optional) - Duration in milliseconds",
- "tokensUsed": "number (optional) - Tokens used by tool call",
- "output": "string (optional) - Tool output"
-}
-```
-
----
-
-### `memesh-record-mistake`
-
-Record errors and mistakes for learning and prevention.
-
-**Aliases:** `buddy-record-mistake`
-
-**Input Schema:**
-```json
-{
- "action": "string (required) - What action the AI took",
- "errorType": "string (required) - Error classification (see values below)",
- "userCorrection": "string (required) - User's correction/feedback",
- "correctMethod": "string (required) - What should have been done instead",
- "impact": "string (required) - Impact of the mistake",
- "preventionMethod": "string (required) - How to prevent in future",
- "relatedRule": "string (optional) - Related rule/guideline",
- "context": "object (optional) - Additional context"
-}
-```
-
-**Error Types:**
-`procedure-violation`, `workflow-skip`, `assumption-error`, `validation-skip`, `responsibility-lack`, `firefighting`, `dependency-miss`, `integration-error`, `deployment-error`
-
-**Example:**
-```json
-{
- "action": "Used synchronous file read in async handler",
- "errorType": "assumption-error",
- "userCorrection": "This blocks the event loop",
- "correctMethod": "Use fs.promises.readFile() instead of fs.readFileSync()",
- "impact": "Server becomes unresponsive under load",
- "preventionMethod": "Always use async I/O in request handlers"
-}
-```
-
----
-
-### `memesh-create-entities`
-
-Create knowledge entities with explicit relationships for fine-grained control over the knowledge graph.
-
-**Input Schema:**
-```json
-{
- "entities": "array (required) - Array of entity objects",
- "entity.name": "string (required) - Unique entity name",
- "entity.entityType": "string (required) - Entity type",
- "entity.observations": "array (required) - Array of observation strings",
- "entity.tags": "array (optional) - Array of tag strings"
-}
-```
-
-**Example:**
-```json
-{
- "entities": [
- {
- "name": "PostgreSQL Database Choice",
- "entityType": "decision",
- "observations": [
- "Chose PostgreSQL over MySQL",
- "Better JSON support and performance"
- ],
- "tags": ["database", "postgresql"]
- }
- ]
-}
-```
-
----
-
-### `memesh-metrics`
-
-View session metrics and tool usage statistics.
-
-**Input Schema:**
-```json
-{
- "section": "string (optional) - Which metrics section: 'all' | 'session' | 'routing' | 'memory' (default: 'all')"
-}
-```
-
----
-
-## Cloud-Only Mode Limitations
-
-In **cloud-only mode** (when better-sqlite3 is unavailable but MEMESH_API_KEY is configured), certain tools are disabled because they require local SQLite storage.
-
-### ❌ Disabled Tools in Cloud-Only Mode
-
-| Tool | Status | Reason |
-|------|--------|--------|
-| `buddy-do` | ❌ Unavailable | Requires local Knowledge Graph for task context |
-| `buddy-remember` | ❌ Unavailable | Requires local memory search |
-| `memesh-create-entities` | ❌ Unavailable | Requires local Knowledge Graph |
-| `memesh-hook-tool-use` | ❌ Unavailable | Requires local memory tracking |
-| `memesh-record-mistake` | ❌ Unavailable | Requires local mistake storage |
-| `memesh-metrics` | ❌ Unavailable | Requires local metrics storage |
-
-### ✅ Available Tools in Cloud-Only Mode
-
-| Tool | Status | Notes |
-|------|--------|-------|
-| `buddy-help` | ✅ Works | Displays command help |
-| `memesh-generate-tests` | ✅ Works | AI-based test generation (stateless) |
-
-### Error Message Example
-
-When calling a disabled tool in cloud-only mode:
-
-```
-❌ Tool 'buddy-remember' is not available in cloud-only mode.
-
-This MCP server is running without local SQLite storage (better-sqlite3 unavailable).
-
-To use local memory tools:
-1. Install better-sqlite3: npm install better-sqlite3
-2. Restart the MCP server
-
-Local SQLite storage is required for memory features.
-```
-
-### Why Cloud-Only Mode Exists
-
-Cloud-only mode enables MeMesh to run in restricted environments (like Claude Desktop Cowork) where:
-- Native modules cannot compile (better-sqlite3, onnxruntime-node, sqlite-vec)
-- Filesystem is read-only
-- Node.js headers cannot be downloaded
-
-See [docs/COWORK_SUPPORT.md](./COWORK_SUPPORT.md) for complete details.
-
-### Future: Cloud-First Memory
-
-**Planned**: Full memory tool support in cloud-only mode through cloud-first memory architecture
-
-**Timeline**: Long-term (no ETA yet)
-
-**How**: Memory tools will proxy to cloud KG API instead of local SQLite
-
-**Related Issues**: #73, #76, #77
-
----
-
-## Command Aliases
-
-All buddy commands support multiple aliases for convenience:
-
-### `buddy do` Aliases
-- `buddy help-with `
-- `buddy execute `
-- `buddy run `
-- `buddy task `
-
-### `buddy remember` Aliases
-- `buddy recall `
-- `buddy retrieve `
-- `buddy search `
-- `buddy find `
-
-## Usage Examples
-
-### Complete Development Workflow
-
-**1. Start new feature:**
-```bash
-buddy do create user registration API endpoint
-```
-
-**2. Check memory for past patterns:**
-```bash
-buddy remember how we implemented other API endpoints
-buddy remember authentication patterns
-```
-
-**3. Execute specific tasks:**
-```bash
-buddy do write tests for registration endpoint
-buddy do add input validation
-buddy do update API documentation
-```
-
-### Memory-Driven Development
-
-**Store important decisions:**
-```bash
-# The system automatically stores important decisions
-# from your conversations and task executions
-```
-
-**Recall when needed:**
-```bash
-buddy remember why we chose JWT over sessions
-buddy remember database migration approach
-buddy remember error handling patterns
-buddy remember API versioning strategy
-```
-
----
-
-## Tips & Best Practices
-
-### 1. Be Specific in Tasks
-```bash
-# ❌ Vague
-buddy do fix bug
-
-# ✅ Specific
-buddy do fix the login timeout bug in auth.ts
-```
-
-### 2. Use Memory Effectively
-```bash
-# Store context at the start of a session
-buddy remember what we're working on
-
-# Recall past decisions before making changes
-buddy remember authentication approach
-```
-
-### 3. Use Aliases for Speed
-```bash
-# Short aliases for common operations
-buddy recall auth # instead of buddy remember auth
-buddy run tests # instead of buddy do run tests
-```
-
----
-
-## Troubleshooting
-
-### Command Not Found
-
-If buddy commands aren't recognized:
-
-1. **Verify MCP Integration:**
- ```bash
- # MeMesh MCP is auto-managed via the plugin's .mcp.json
- # If auto-configuration failed, run:
- memesh setup
- ```
-
-2. **Restart Claude Code:**
- ```bash
- # After configuration changes
- ```
-
-3. **Check Server Logs:**
- ```bash
- # Look for MCP server errors
- ```
-
-### Tool Execution Failures
-
-If tools fail to execute:
-
-1. **Check Dependencies:**
- ```bash
- npm install
- npm run build
- ```
-
-2. **Verify Environment:**
- - Node.js 20+ installed
- - Claude Code configured
- - MCP server running
-
-3. **Test Manually:**
- ```bash
- npm run mcp
- ```
-
----
-
-## Next Steps
-
-- **API Reference:** See [API_REFERENCE.md](./api/API_REFERENCE.md) for detailed MCP tool documentation
-- **User Guide:** See [USER_GUIDE.md](./USER_GUIDE.md) for complete feature reference
-- **Best Practices:** See [BEST_PRACTICES.md](./BEST_PRACTICES.md) for effective workflows
-- **Installation:** See [GETTING_STARTED.md](./GETTING_STARTED.md) for setup guide
-- **Troubleshooting:** See [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) for common issues
-- **Contributing:** See [CONTRIBUTING.md](./CONTRIBUTING.md) to add features
-
----
-
-**Questions?** Open an issue on GitHub or check the documentation.
diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md
deleted file mode 100644
index d38e3607..00000000
--- a/docs/DEVELOPMENT.md
+++ /dev/null
@@ -1,609 +0,0 @@
-# Development Guide
-
-This guide covers the day-to-day development workflow for MeMesh Plugin contributors. For contribution guidelines and release processes, see [CONTRIBUTING.md](../CONTRIBUTING.md).
-
-## Table of Contents
-
-- [Prerequisites](#prerequisites)
-- [Initial Setup](#initial-setup)
-- [Development Workflow](#development-workflow)
-- [Testing Strategy](#testing-strategy)
-- [Debugging](#debugging)
-- [Common Tasks](#common-tasks)
-- [Troubleshooting](#troubleshooting)
-- [Best Practices](#best-practices)
-
----
-
-## Prerequisites
-
-### Required Software
-
-- **Node.js** >= 20.0.0
- ```bash
- node --version # Should be >= 20.0.0
- ```
-
-- **npm** >= 9.0.0
- ```bash
- npm --version
- ```
-
-- **Claude Code CLI** (for MCP server testing)
- ```bash
- claude --version
- ```
-
-- **Git**
- ```bash
- git --version
- ```
-
-### Optional Tools
-
-- **tsx** - TypeScript executor (automatically installed as dev dependency)
-- **vitest** - Testing framework (automatically installed)
-
----
-
-## Initial Setup
-
-### 1. Clone and Install
-
-```bash
-# Clone the repository
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-
-# Install dependencies
-npm install
-
-# Build the project
-npm run build
-
-# Verify installation
-npm test
-```
-
-### 2. Verify MCP Server
-
-```bash
-# Check if MCP server can start
-npm run verify:mcp
-
-# List MCP processes (if any)
-npm run processes:list
-```
-
-Expected output:
-```
-✅ MCP server verified successfully
-```
-
-### 3. Install Locally (Optional)
-
-For testing the global install experience:
-
-```bash
-# Link package globally
-npm link
-
-# Verify global installation
-memesh --version
-
-# Unlink when done
-npm unlink -g @pcircle/memesh
-```
-
----
-
-## Development Workflow
-
-### Starting Development
-
-```bash
-# Watch mode - auto-rebuild on file changes
-npm run dev
-
-# In a separate terminal, test your changes
-npm test -- --watch
-```
-
-### Making Changes
-
-1. **Create a feature branch**
- ```bash
- git checkout -b feature/your-feature-name
- ```
-
-2. **Make your changes**
- - Edit source files in `src/`
- - Add/update tests in `tests/`
-
-3. **Build and test**
- ```bash
- npm run build
- npm test
- ```
-
-4. **Verify code quality**
- ```bash
- npm run lint
- npm run typecheck
- npm run format
- ```
-
-### Understanding the Build Process
-
-```bash
-npm run build
-```
-
-This command:
-1. Compiles TypeScript → JavaScript (`tsc`)
-2. Copies resources to `dist/` (`copy:resources`)
-3. Makes MCP server executable (`chmod +x`)
-4. Prepares plugin package (`prepare:plugin`)
-
-**Important**: The MCP server runs as a daemon. After building, you must restart Claude Code to load the new version.
-
----
-
-## Testing Strategy
-
-### Test Types
-
-| Test Type | Command | When to Use |
-|-----------|---------|-------------|
-| **Unit Tests** | `npm test` | Default - fast, isolated tests |
-| **Integration Tests** | `npm run test:integration` | Test component interactions |
-| **E2E Tests** | `npm run test:e2e:safe` | ⚠️ Resource-intensive, use sparingly |
-| **Coverage Report** | `npm run test:coverage` | Before PR submission |
-
-### Running Tests
-
-```bash
-# Run all tests (single-thread mode to prevent worker leaks)
-npm test
-
-# Run specific test file
-npm test src/memory/MemoryManager.test.ts
-
-# Run tests matching a pattern
-npm test -- --grep "MemoryManager"
-
-# Watch mode for TDD
-npm test -- --watch
-
-# Generate coverage report
-npm run test:coverage
-```
-
-### Test Coverage Requirements
-
-- **Minimum for existing code**: 60%
-- **Minimum for new code**: 70%
-- **Target for core modules**: 80%+
-
-Check current coverage:
-```bash
-npm run test:coverage
-```
-
-### E2E Testing (⚠️ Important)
-
-E2E tests are resource-intensive and can freeze your system if not managed properly.
-
-**Always use the safe wrapper:**
-```bash
-npm run test:e2e:safe
-```
-
-**Never run:**
-```bash
-npm run test:e2e # ❌ Will exit with warning
-```
-
-The `test:e2e:safe` script:
-- Monitors system resources (CPU, memory)
-- Runs tests with `maxThreads: 1`
-- Aborts if resource usage exceeds safe thresholds
-
----
-
-## Debugging
-
-### MCP Server Debugging
-
-The MCP server runs as a persistent daemon process. Understanding its lifecycle is crucial for debugging.
-
-#### Check MCP Server Status
-
-```bash
-# List all MCP server processes
-npm run processes:list
-
-# Check for orphaned processes
-npm run processes:orphaned
-
-# View MCP configuration
-npm run processes:config
-```
-
-Example output:
-```
-┌─────────────────────────────────────────────────────────┐
-│ MCP Server Process Status │
-├──────┬──────────────┬────────┬──────────┬──────────────┤
-│ PID │ Command │ Status │ Uptime │ TTY │
-├──────┼──────────────┼────────┼──────────┼──────────────┤
-│ 1234 │ server-boot… │ Active │ 2h 15m │ ttys002 │
-└──────┴──────────────┴────────┴──────────┴──────────────┘
-```
-
-#### Common MCP Server Issues
-
-**Problem**: MCP tools not loading in Claude Code
-
-**Diagnosis**:
-```bash
-# 1. Check if old process is running
-npm run processes:list
-
-# 2. Check global vs local version mismatch
-npm list -g @pcircle/memesh # Global version
-cat package.json | grep version # Local version
-```
-
-**Solution**:
-```bash
-# 1. Kill old MCP server processes
-npm run processes:kill
-
-# 2. Restart Claude Code
-# Exit Claude Code completely and restart
-
-# 3. Verify tools are now available
-# In Claude Code, type: buddy-help
-```
-
-**Root Cause**: MCP server runs as a daemon and doesn't auto-reload when code changes. Old processes keep running with outdated code.
-
-**Prevention**:
-- Always restart Claude Code after rebuilding
-- Kill old processes before testing: `npm run processes:kill`
-
-#### Debugging with Node Inspector
-
-Start MCP server with debugger enabled and open Chrome DevTools at chrome://inspect
-
-```bash
-node --inspect dist/mcp/server-bootstrap.js
-```
-
-#### Viewing Logs
-
-MCP server logs are written to `~/.cache/claude/mcp-logs/`
-
-```bash
-# View recent logs
-tail -f ~/.cache/claude/mcp-logs/memesh.log
-```
-
-### Memory System Debugging
-
-```bash
-# Check database schema
-sqlite3 ~/.cache/claude/memesh/memory.db ".schema"
-
-# List memories
-sqlite3 ~/.cache/claude/memesh/memory.db "SELECT * FROM memories LIMIT 10;"
-
-# Check database size
-du -sh ~/.cache/claude/memesh/
-```
-
-### TypeScript Debugging
-
-```bash
-# Type check without building
-npm run typecheck
-
-# Show all type errors
-tsc --noEmit --pretty false
-
-# Check specific file
-tsc --noEmit src/your-file.ts
-```
-
----
-
-## Common Tasks
-
-### Adding a New MCP Tool
-
-1. **Define the tool** in `src/mcp/ToolDefinitions.ts`
-2. **Create the handler** in `src/mcp/handlers/`
-3. **Add Zod schema** in `src/mcp/schemas/`
-4. **Write tests** in `tests/unit/mcp/`
-5. **Update documentation** (`docs/COMMANDS.md`, `README.md`)
-6. **Test end-to-end**:
- ```bash
- npm run build
- npm run verify:mcp
- # Restart Claude Code
- # Test: buddy-your-tool "test input"
- ```
-
-### Modifying Database Schema
-
-1. **Create migration** in `src/db/migrations/`
-2. **Update version** in `src/db/schema.ts`
-3. **Verify migration**: `npm run verify:migration`
-4. **Test**: `npm test -- src/db/migrations`
-
-### Updating Dependencies
-
-```bash
-# Check outdated dependencies
-npm outdated
-
-# Update specific dependency
-npm update package-name
-
-# After updating, verify:
-npm run build
-npm test
-npm run test:install
-```
-
-### Creating a Release
-
-See [docs/RELEASE_PROCESS.md](RELEASE_PROCESS.md) for the full release workflow.
-
-Quick checklist:
-```bash
-# 1. Bump version (on develop branch)
-npm version patch --no-git-tag-version # or minor, or major
-
-# 2. Update CHANGELOG.md
-# Add entry for new version
-
-# 3. Commit and push
-git add package.json CHANGELOG.md
-git commit -m "chore(release): bump version to X.Y.Z"
-git push origin develop
-
-# 4. Open PR: develop → main, review & merge
-gh pr create --base main --head develop --title "chore(release): vX.Y.Z"
-
-# 5. Manual npm publish (GitHub Actions cannot auto-trigger publish)
-npm publish --access public
-npm view @pcircle/memesh version
-```
-
----
-
-## Troubleshooting
-
-### Build Errors
-
-**Problem**: `tsc` fails with type errors
-
-**Solution**:
-```bash
-# Clear TypeScript cache
-rm -rf dist/
-rm -rf node_modules/.cache/
-
-# Reinstall dependencies
-npm ci
-
-# Rebuild
-npm run build
-```
-
----
-
-**Problem**: Permission denied errors
-
-**Solution**:
-```bash
-# Make scripts executable
-chmod +x scripts/*.sh
-
-# Rebuild
-npm run build
-```
-
----
-
-### Test Failures
-
-**Problem**: Tests fail after making changes
-
-**Solution**:
-```bash
-# Clear test cache
-npm test -- --clearCache
-
-# Run tests again
-npm test
-```
-
----
-
-**Problem**: E2E tests freeze the system
-
-**Solution**:
-- Always use `npm run test:e2e:safe`
-- Never run multiple E2E test processes simultaneously
-- If system freezes, kill vitest processes manually
-
----
-
-### Installation Issues
-
-**Problem**: `npm install` fails
-
-**Solution**:
-```bash
-# Clear npm cache
-npm cache clean --force
-
-# Remove node_modules
-rm -rf node_modules package-lock.json
-
-# Reinstall
-npm install
-```
-
----
-
-**Problem**: Postinstall script fails
-
-**Solution**:
-```bash
-# Run postinstall manually
-node scripts/postinstall-new.js
-
-# Check logs
-cat ~/.cache/claude/memesh/install.log
-```
-
----
-
-### MCP Server Issues
-
-**Problem**: Tools not available in Claude Code
-
-**Solution**:
-```bash
-# 1. Kill old processes
-npm run processes:kill
-
-# 2. Verify installation
-npm list -g @pcircle/memesh
-
-# 3. Reinstall if needed
-npm install -g @pcircle/memesh
-
-# 4. Restart Claude Code completely
-```
-
----
-
-**Problem**: MCP server crashes on startup
-
-**Solution**:
-```bash
-# Check logs
-tail -f ~/.cache/claude/mcp-logs/memesh.log
-
-# Verify server can start
-npm run verify:mcp
-
-# If verification fails, check:
-# 1. Node.js version >= 20
-# 2. Database file is not corrupted
-# 3. No other process using the same port
-```
-
----
-
-## Best Practices
-
-### Code Quality
-
-1. **Use strict TypeScript**
- - Avoid `any` types
- - Define explicit interfaces
- - Use type guards for runtime checks
-
-2. **Write tests for new code**
- - Minimum 70% coverage for new files
- - Test both success and error paths
- - Use descriptive test names
-
-3. **Follow existing patterns**
- - Check how similar features are implemented
- - Maintain consistent code style
- - Reuse existing utilities
-
-### Git Workflow
-
-1. **Small, focused commits**
- - One logical change per commit
- - Use conventional commit messages
- - Reference issue numbers when applicable
-
-2. **Keep branches up to date**
- ```bash
- git checkout main
- git pull
- git checkout feature/your-branch
- git rebase main
- ```
-
-3. **Run pre-commit checks**
- ```bash
- npm run lint
- npm run typecheck
- npm test
- ```
-
-### Performance
-
-1. **Avoid blocking operations**
- - Use async/await for I/O
- - Don't block the event loop
-
-2. **Profile before optimizing**
- - Use Node.js built-in profiler
- - Measure before and after changes
-
-3. **Monitor memory usage**
- - Check for memory leaks
- - Use appropriate data structures
-
-### Security
-
-1. **Never commit sensitive data**
- - Use environment variables
- - Add sensitive files to `.gitignore`
-
-2. **Validate user input**
- - Use Zod schemas for validation
- - Sanitize inputs before database queries
-
-3. **Use secure dependencies**
- ```bash
- # Audit dependencies
- npm audit
-
- # Fix vulnerabilities
- npm audit fix
- ```
-
----
-
-## Additional Resources
-
-- **[Architecture Documentation](ARCHITECTURE.md)** - System design and components
-- **API Reference** - Run `npm run build` to generate API docs locally
-- **[User Guide](USER_GUIDE.md)** - End-user documentation
-- **[Troubleshooting Guide](TROUBLESHOOTING.md)** - Common issues and solutions
-- **[MCP Specification](https://modelcontextprotocol.io)** - MCP protocol details
-
----
-
-## Getting Help
-
-- **Discussions**: [GitHub Discussions](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions)
-- **Issues**: [GitHub Issues](https://github.com/PCIRCLE-AI/claude-code-buddy/issues)
-
----
-
-**Happy coding!** 🚀
-
-If you find something unclear in this guide, please [open an issue](https://github.com/PCIRCLE-AI/claude-code-buddy/issues/new?labels=documentation) or submit a PR to improve it.
diff --git a/docs/ERROR_REFERENCE.md b/docs/ERROR_REFERENCE.md
deleted file mode 100644
index b1804892..00000000
--- a/docs/ERROR_REFERENCE.md
+++ /dev/null
@@ -1,140 +0,0 @@
-# Error Reference
-
-Complete reference of MeMesh Plugin error types, codes, and solutions.
-
----
-
-## Error Hierarchy
-
-All MeMesh errors extend `BaseError` and include:
-- **`code`**: Programmatic error code (see table below)
-- **`context`**: Structured metadata about the error
-- **`timestamp`**: When the error occurred
-
-```
-BaseError
-├── ValidationError (input validation failures)
-├── StateError (invalid state for operation)
-├── NotFoundError (resource not found)
-├── ConfigurationError (configuration issues)
-├── OperationError (operation execution failures)
-└── ExternalServiceError (external API/service failures)
-```
-
----
-
-## Error Codes
-
-### Validation Errors (VALIDATION_*)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `VALIDATION_FAILED` | `ValidationError` | "Memory content cannot be empty" | Empty or missing required field | Provide non-empty value for required fields |
-| `VALIDATION_FAILED` | `ValidationError` | "Metrics store path must be within user home or data directory" | Path traversal attempt | Use paths within `~/.memesh/` or home directory |
-| `VALIDATION_FAILED` | `ValidationError` | "Limit must be non-negative" | Negative number for pagination | Use positive integers for `limit` and `offset` |
-| `VALIDATION_FAILED` | `ValidationError` | "Database path must be a non-empty string" | Empty database path | Provide a valid `.db` file path |
-| `VALIDATION_FAILED` | `ValidationError` | "Database path contains invalid null byte" | Null byte injection attempt | Remove null bytes from path |
-| `VALIDATION_FAILED` | `ValidationError` | "Database path must have .db extension" | Wrong file extension | Use `.db` extension for database files |
-| `VALIDATION_FAILED` | `ValidationError` | "Database path must be within allowed directory" | Path outside allowed scope | Use paths within `~/.memesh/` |
-| `VALIDATION_FAILED` | `ValidationError` | "CPU percentage must be between 0 and 100" | Out-of-range resource value | Use values within documented ranges |
-| `VALIDATION_FAILED` | `ValidationError` | "Max background agents must be >= 1" | Invalid agent limit | Set at least 1 for max agents |
-| `VALIDATION_FAILED` | `ValidationError` | "Memory ID must start with prefix: mem_" | Invalid ID format | Use IDs with `mem_` prefix |
-| `VALIDATION_FAILED` | `ValidationError` | "Invalid memory type" | Unsupported memory type | Use: `decision`, `feature`, `bug_fix`, `lesson_learned`, `pattern`, `note` |
-| `VALIDATION_FAILED` | `ValidationError` | "Command is required" | Empty tool command | Provide command string |
-| `VALIDATION_FAILED` | `ValidationError` | "CSRF protection: Missing origin header" | Missing security header | Ensure Origin header is present |
-| `VALIDATION_FAILED` | `ValidationError` | "Agent name must be a non-empty string" | Empty agent name | Provide non-empty agent name |
-
-### State Errors (INVALID_STATE)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `INVALID_STATE` | `StateError` | "Memory system not initialized" | Using system before init | Call `initialize()` before operations |
-| `ALREADY_INITIALIZED` | `StateError` | "System already initialized" | Double initialization | Check state before calling init |
-
-### Not Found Errors (RESOURCE_NOT_FOUND)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `RESOURCE_NOT_FOUND` | `NotFoundError` | "Task not found" | Invalid task ID | Verify task ID with `memesh-metrics` |
-| `ENTITY_NOT_FOUND` | `NotFoundError` | "Entity not found" | Knowledge graph entity missing | Use `buddy-remember` to search first |
-| `TOOL_NOT_FOUND` | `NotFoundError` | "Tool not found" | Invalid MCP tool name | Check available tools with `buddy-help` |
-
-### Configuration Errors (CONFIGURATION_*)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `CONFIGURATION_INVALID` | `ConfigurationError` | "API key is required" | Missing API key | Run `memesh login` or set `ANTHROPIC_API_KEY` |
-| `CONFIGURATION_MISSING` | `ConfigurationError` | "Configuration file not found" | Missing config | Run `memesh setup` to create config |
-
-### Operation Errors (OPERATION_*)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `OPERATION_FAILED` | `OperationError` | "Memory update failed" | Database write failure | Check disk space, file permissions |
-| `OPERATION_FAILED` | `OperationError` | "Failed to create UnifiedMemoryStore" | Store initialization error | Check database file integrity |
-| `OPERATION_FAILED` | `OperationError` | "Failed to store memory" | Write operation failed | Verify database is not locked |
-| `INITIALIZATION_FAILED` | `OperationError` | "Tool dispatcher is not attached" | Missing component wiring | Restart MeMesh server |
-| `EXECUTION_FAILED` | `OperationError` | "Tool execution failed" | Runtime tool error | Check tool input parameters |
-
-### External Service Errors (EXTERNAL_*)
-
-| Code | Error Class | Common Message | Cause | Solution |
-|------|-------------|----------------|-------|----------|
-| `EXTERNAL_SERVICE_ERROR` | `ExternalServiceError` | "External API request failed" | API call failed | Check network, API key validity |
-| `NETWORK_ERROR` | `ExternalServiceError` | "Network request timed out" | Connection timeout | Retry, check connectivity |
-| `TIMEOUT_ERROR` | `ExternalServiceError` | "Operation timed out" | Exceeded time limit | Increase timeout or reduce workload |
-
----
-
-## SQLite-Specific Errors
-
-These are native SQLite errors caught and wrapped by MeMesh:
-
-| SQLite Error | MeMesh Handling | Cause | Solution |
-|-------------|-----------------|-------|----------|
-| `SQLITE_BUSY` | Retried with exponential backoff | Database locked by another process | Wait or kill other MeMesh processes: `npm run processes:kill` |
-| `SQLITE_FULL` | Connection discarded from pool | Disk full | Free disk space, check `~/.memesh/` size |
-| `SQLITE_CORRUPT` | Detected by `PRAGMA quick_check` | Database corruption | Delete `~/.memesh/database.db` and restart (data loss) |
-| `SQLITE_READONLY` | `OperationError` thrown | File permissions | Fix: `chmod 644 ~/.memesh/database.db` |
-| `SQLITE_IOERR` | Connection discarded | I/O failure | Check disk health, file system |
-
----
-
-## Programmatic Error Handling
-
-```typescript
-import { isValidationError, isOperationError, getErrorCode, ErrorCode } from '@pcircle/memesh';
-
-try {
- await store.createMemory(data);
-} catch (error) {
- if (isValidationError(error)) {
- // Input validation failed — fix input
- console.log('Invalid field:', error.context);
- } else if (isOperationError(error)) {
- // Operation failed — retry or report
- console.log('Operation:', error.context?.operation);
- }
-
- // Or use error codes
- const code = getErrorCode(error);
- if (code === ErrorCode.VALIDATION_FAILED) { /* ... */ }
-}
-```
-
----
-
-## Type Guards
-
-| Function | Returns `true` for |
-|----------|-------------------|
-| `isBaseError(err)` | Any MeMesh custom error |
-| `isValidationError(err)` | Input validation errors |
-| `isStateError(err)` | State-related errors |
-| `isNotFoundError(err)` | Resource not found errors |
-| `isConfigurationError(err)` | Configuration errors |
-| `isOperationError(err)` | Operation execution errors |
-| `isExternalServiceError(err)` | External service errors |
-
----
-
diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md
deleted file mode 100644
index bc5602b6..00000000
--- a/docs/GETTING_STARTED.md
+++ /dev/null
@@ -1,370 +0,0 @@
-# Getting Started with MeMesh Plugin
-
-**Install Time**: 2 minutes • **First Memory**: 30 seconds • **Ready to Code**: Immediately
-
----
-
-## What is MeMesh Plugin?
-
-**MeMesh Plugin gives Claude Code a persistent memory.** Simple as that.
-
-Every project you work on, every decision you make, every bug you fix—Claude remembers it all. No more re-explaining your architecture every session. No more "As I mentioned earlier..." No more starting from scratch.
-
-### What You Get
-
-✅ **Remembers Everything**: Architecture decisions, design patterns, bugs fixed, conventions used
-✅ **Smart Task Routing**: Automatically detects what kind of work you need and applies the right expertise
-✅ **Persistent Memory**: Knowledge graph persists across all sessions (stored locally)
-✅ **Zero Configuration**: Works out of the box with sensible defaults
-✅ **100% Local**: Your code and decisions never leave your machine
-
-### Before and After
-
-**❌ Without MeMesh:**
-```
-Session 1: "We use JWT for auth..."
-Session 2: "Remember our auth?"
-Claude: "Sorry, no context..."
-You: *explains everything again* 😤
-```
-
-**✅ With MeMesh:**
-```
-Session 1: "Setup JWT auth"
-Session 2: "Remember auth"
-MeMesh: "JWT auth from Jan 15: 15min access tokens, 7-day refresh..."
-```
-
----
-
-## Installation
-
-### Quick Install (Recommended)
-
-One command, fully automatic:
-
-```bash
-npm install -g @pcircle/memesh
-```
-
-Restart Claude Code. **Done!**
-
-No config files to edit. No API keys needed. The postinstall script auto-configures everything.
-
-
-Install from source (for contributors)
-
-```bash
-git clone https://github.com/PCIRCLE-AI/claude-code-buddy.git
-cd claude-code-buddy
-npm install && npm run build
-```
-
-MeMesh is a Claude Code Plugin — the MCP server is auto-managed via the plugin's `.mcp.json` file. No manual configuration of `~/.claude/mcp_settings.json` is needed.
-
-If auto-configuration did not work, run:
-```bash
-memesh setup
-```
-
-
-
----
-
-## Verify It's Working
-
-### 1. Check MeMesh is Active
-
-In Claude Code, run:
-```bash
-buddy-help
-```
-
-**Expected output:**
-```
-MeMesh v2.10.1 - Persistent Memory for Claude Code
-
-Available Commands:
- buddy-do - Execute any development task
- buddy-remember - Recall project decisions and patterns
- buddy-help - Show this help
-
- memesh-generate-tests - Auto-generate comprehensive tests
-
- ... (8 tools total)
-
-Status: ✅ Connected (8 tools available)
-```
-
-### 2. Test Memory Storage
-
-Try this:
-```bash
-"Store this decision: We're using PostgreSQL for JSONB support"
-```
-
-Then recall it:
-```bash
-buddy-remember "database choice"
-```
-
-**You should see:** Your stored decision retrieved from memory.
-
-### 3. Test Task Routing
-
-```bash
-buddy-do "review this function for security issues:
-function login(user, pass) {
- return db.query('SELECT * FROM users WHERE name=' + user);
-}"
-```
-
-**You should see:** Security analysis identifying SQL injection vulnerability.
-
----
-
-## Troubleshooting
-
-
-❌ "MCP server not found"
-
-**Cause**: Claude Code can't connect to MeMesh
-
-**Fix**:
-1. Verify installation path is correct:
- ```bash
- cd claude-code-buddy
- pwd # Copy this path
- ```
-
-2. Check the file exists:
- ```bash
- ls dist/mcp/server-bootstrap.js
- ```
-
-3. Rebuild if missing:
- ```bash
- npm run build
- ```
-
-4. Restart Claude Code completely
-
-
-
-
-❌ "Commands don't respond"
-
-**Cause**: MeMesh server isn't running or crashed
-
-**Fix**:
-1. Check server logs:
- ```bash
- cat ~/.claude/logs/memesh.log
- ```
-
-2. Verify Node.js version:
- ```bash
- node --version # Should be v20.0.0+
- ```
-
-3. Reinstall dependencies:
- ```bash
- cd claude-code-buddy
- rm -rf node_modules
- npm install
- npm run build
- ```
-
-4. Restart Claude Code
-
-
-
-
-❌ "Installation succeeded but nothing happens"
-
-**Cause**: Plugin directory not loaded
-
-**Fix**:
-1. Start Claude Code with plugin flag:
- ```bash
- claude --plugin-dir /path/to/claude-code-buddy
- ```
-
-2. Create shell alias for convenience:
- ```bash
- # Add to ~/.bashrc or ~/.zshrc
- alias claude-mem='claude --plugin-dir /path/to/claude-code-buddy'
- ```
-
-3. For team distribution, see Claude Code plugin documentation
-
-
-
----
-
-## Your First Commands
-
-### Store and Retrieve
-
-**Store a decision:**
-```bash
-"Remember: We chose React over Vue because the team knows React better"
-```
-
-**Retrieve it later:**
-```bash
-buddy-remember "why React?"
-```
-
-**Expected**: Instant recall of your decision with the reasoning.
-
-### Smart Task Routing
-
-**Code review:**
-```bash
-buddy-do "review src/auth.ts for security"
-```
-
-**Generate tests:**
-```bash
-memesh-generate-tests --code "function add(a, b) { return a + b; }"
-```
-
-**Refactor code:**
-```bash
-buddy-do "refactor UserService to reduce duplication"
-```
-
-**Each command automatically routes to the right internal capability with the right expertise.**
-
----
-
-## Common Scenarios
-
-### Scenario 1: Joining an Existing Project
-
-**You**: New to the project, need to understand decisions
-
-```bash
-# Query project memory
-buddy-remember "architecture decisions"
-buddy-remember "why PostgreSQL"
-buddy-remember "authentication approach"
-
-# Get overview
-buddy-do "summarize the key technical decisions in this project"
-```
-
-**Result**: Instant context on why things are the way they are.
-
----
-
-### Scenario 2: Starting a New Project
-
-**You**: Fresh start, want to track decisions from day one
-
-```bash
-# Make initial decisions and store them
-"Remember: Using TypeScript strict mode for type safety"
-"Remember: Chose Express over Fastify for ecosystem maturity"
-"Remember: Using Vitest for testing because it's fast and ESM-native"
-
-# Later in the project
-buddy-remember "testing framework"
-```
-
-**Result**: Your decisions are preserved across sessions (30/90 days). New team members can query them months later.
-
----
-
-### Scenario 3: Debugging a Recurring Issue
-
-**You**: Bug keeps coming back, want to track the fix
-
-```bash
-# Debug and document
-buddy-do "analyze why users are logged out randomly"
-
-# After fixing
-"Remember: Random logout bug was caused by JWT expiry not being refreshed.
-Fixed in auth/middleware.ts by adding token refresh logic."
-
-# Next time it happens
-buddy-remember "logout bug"
-```
-
-**Result**: Root cause analysis is stored. If the bug returns, you have the full history.
-
----
-
-## Next Steps
-
-Now that MeMesh is running, explore more:
-
-### Learn More
-- **[User Guide](USER_GUIDE.md)** - Complete feature documentation
-- **[API Reference](api/API_REFERENCE.md)** - All commands and tools explained
-- **[Troubleshooting](TROUBLESHOOTING.md)** - Common issues and solutions
-
-### Advanced Topics
-- Knowledge graph queries and entity management
-- Memory best practices and organization
-- Error tracking and mistake prevention
-
----
-
-## Quick Reference Card
-
-```bash
-# Core Commands
-buddy-help # Show available commands
-buddy-do "" # Execute any development task
-buddy-remember "" # Query project memory
-
-# Testing
-memesh-generate-tests # Auto-generate tests (provide specification or code)
-
-# Status
-"Show MeMesh status" # Connection and capability status
-```
-
----
-
-## Why It Works
-
-**MeMesh operates on three principles:**
-
-1. **Memory First**: Every significant action is recorded automatically
-2. **Context-Aware**: Tasks are enhanced with relevant project memory
-3. **Zero Friction**: No configuration, no API keys, no setup ceremony
-
-**The result**: Claude Code that remembers your project like a teammate, not a tool.
-
----
-
-## Learn More
-
-**Comprehensive Documentation**:
-
-- **[User Guide](./USER_GUIDE.md)** - Complete feature reference and workflows
-- **[API Reference](./api/API_REFERENCE.md)** - Detailed MCP tool documentation with schemas and examples
-- **[Best Practices](./BEST_PRACTICES.md)** - Effective patterns and workflows
-- **[Commands Reference](./COMMANDS.md)** - All available commands
-- **[Troubleshooting](./TROUBLESHOOTING.md)** - Common issues and solutions
-
-**Advanced Topics**:
-- [Knowledge Graph Tools](./api/API_REFERENCE.md#knowledge-graph-tools) - Build complex entity relationships
-- [Integration Examples](./api/API_REFERENCE.md#integration-examples) - Complete workflow patterns
-- [Error Reference](./api/API_REFERENCE.md#error-reference) - Error codes and handling
-
----
-
-**Ready to code with memory?** 🚀
-
-Install MeMesh, run `buddy-help`, and never explain your architecture twice again.
-
-**Questions?** [Open an issue](https://github.com/PCIRCLE-AI/claude-code-buddy/issues) or [start a discussion](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions).
-
----
-
-**Not affiliated with Anthropic PBC** • Independent open-source project
diff --git a/docs/TROUBLESHOOTING.md b/docs/TROUBLESHOOTING.md
deleted file mode 100644
index 692989f0..00000000
--- a/docs/TROUBLESHOOTING.md
+++ /dev/null
@@ -1,166 +0,0 @@
-# Troubleshooting Guide
-
-## TL;DR
-If you're in a hurry:
-1. Run `memesh setup` to fix installation and path issues.
-2. Restart Claude Code completely.
-3. Verify your Node.js version is **>= v20.0.0**.
-
----
-
-## Diagnostic Flowchart
-
-Use this decision tree to quickly find your solution:
-
-```
-Start: What's the problem?
-│
-├─ "Tools not found / command not found"
-│ ├─ Is MeMesh installed? → npm list -g @pcircle/memesh
-│ │ ├─ NOT installed → npm install -g @pcircle/memesh
-│ │ └─ Installed → Run: memesh setup → Restart Claude Code
-│ └─ Still failing? → Check Node.js >= v20: node --version
-│
-├─ "MCP Server Connection Failed"
-│ ├─ Restart Claude Code completely (quit + reopen)
-│ ├─ Still failing? → memesh config validate
-│ │ ├─ Config invalid → memesh setup
-│ │ └─ Config valid → Check for orphaned processes:
-│ │ npm run processes:kill → Restart Claude Code
-│ └─ Still failing? → Check daemon: memesh daemon status
-│
-├─ "Memory not persisting"
-│ ├─ Does database exist? → ls ~/.memesh/database.db
-│ │ ├─ Missing → memesh setup (re-creates it)
-│ │ └─ Exists → Check permissions: ls -la ~/.memesh/
-│ └─ Permissions OK? → memesh config validate
-│
-├─ "Slow or hanging"
-│ ├─ Kill processes → pkill -f memesh
-│ ├─ Check orphans → npm run processes:orphaned
-│ └─ Restart Claude Code
-│
-├─ "Permission denied"
-│ ├─ During install? → Use nvm, avoid sudo
-│ └─ During runtime? → chmod 700 ~/.memesh/ && chmod 644 ~/.memesh/database.db
-│
-└─ "Database corruption / SQLITE_CORRUPT"
- ├─ Backup: cp ~/.memesh/database.db ~/.memesh/database.db.bak
- ├─ Delete: rm ~/.memesh/database.db
- └─ Restart Claude Code (database re-created automatically)
-```
-
----
-
-## Quick Diagnostic Commands
-
-Before troubleshooting, run these commands to gather information:
-
-```bash
-# 1. Verify MeMesh installation
-npm list -g @pcircle/memesh
-
-# 2. Check Node.js and npm versions
-node --version # Should be >= v20.0.0
-npm --version # Should be >= v9.0.0
-
-# 3. Validate MCP configuration
-memesh config validate
-
-# 4. Check daemon status
-memesh daemon status
-
-# 5. Test MeMesh directly
-npx @pcircle/memesh --help
-```
-
----
-
-## Issue Categories
-
-- [Most Common Issues](#most-common-issues)
-- [Performance & Persistence](#performance--persistence)
-- [Error Reference](#error-reference)
-- [Getting Help](#getting-help)
-
----
-
-## Most Common Issues
-
-### 1. "buddy-help" command not found
-**Symptoms**: Shell returns "command not found" after installation.
-**Quick Fix:**
-```bash
-memesh setup # Run interactive setup to fix PATH
-# Restart your terminal or Claude Code
-# Try: buddy-help
-```
-
-### 2. "MCP Server Connection Failed"
-**Symptoms**: Claude Code cannot connect to the MeMesh server.
-**Quick Fix:**
-```bash
-# Restart Claude Code completely
-# Wait a few seconds for the MCP server to initialize
-# Try the command again
-```
-
-### 3. "Permission denied" errors
-**Symptoms**: Errors during `npm install` or file access.
-**Quick Fix:**
-- **Avoid sudo**: Use an npm prefix or a version manager like `nvm`.
-- **Manual fix**: `sudo npm install -g @pcircle/memesh` (not recommended for long-term).
-
----
-
-## Performance & Persistence
-
-### 4. Commands are slow or hanging
-**Symptoms**: MeMesh takes too long to respond or hangs indefinitely.
-**Quick Fix:**
-- **Kill processes**: `pkill -f memesh` then restart Claude Code.
-- **Simplify**: Break complex tasks into smaller sub-tasks.
-- **Check orphans**: `npm run processes:orphaned` to find orphaned processes.
-
-### 5. Memory not persisting
-**Symptoms**: Information or context from previous sessions is lost.
-**Quick Fix:**
-- Check permissions for `~/.memesh/` directory (or `~/.claude-code-buddy/` if migrating from older version).
-- Verify the database exists: `ls ~/.memesh/database.db`
-- Run `memesh config validate` to ensure storage is correctly configured.
-
-### 6. Multiple MCP server processes
-**Symptoms**: High CPU or memory usage from duplicate MeMesh processes.
-**Quick Fix:**
-```bash
-npm run processes:list # List all processes
-npm run processes:orphaned # Find orphaned processes
-npm run processes:kill # Kill all MeMesh processes
-```
-Then restart Claude Code.
-
----
-
----
-
-## Error Reference
-
-For a complete list of error codes, messages, and solutions, see **[ERROR_REFERENCE.md](./ERROR_REFERENCE.md)**.
-
-Common error types:
-- **ValidationError**: Invalid input — check parameters
-- **StateError**: System not ready — run `memesh setup`
-- **OperationError**: Operation failed — check disk space, permissions
-- **ConfigurationError**: Missing config — run `memesh config validate`
-
----
-
-## Getting Help
-
-1. **Quick Start:** [docs/QUICK_START.md](./QUICK_START.md)
-2. **CLI Parameters:** [docs/CLI_PARAMETERS.md](./CLI_PARAMETERS.md)
-3. **Error Reference:** [docs/ERROR_REFERENCE.md](./ERROR_REFERENCE.md)
-4. **Report Issue:** `memesh report-issue`
-
----
-
diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md
deleted file mode 100644
index 080a62d1..00000000
--- a/docs/USER_GUIDE.md
+++ /dev/null
@@ -1,1358 +0,0 @@
-# User Guide
-## Complete Reference for MeMesh Plugin
-
-Welcome to the complete MeMesh Plugin User Guide! This guide provides detailed information about all commands, features, and workflows.
-
----
-
-## Table of Contents
-
-1. [Introduction](#introduction)
-2. [Core Commands](#core-commands)
-3. [MCP Tools](#mcp-tools)
- - [Advanced MCP Tools](#advanced-mcp-tools)
- - [Learning & Error Tracking](#learning--error-tracking)
-4. [CLI Commands](#cli-commands)
-5. [Memory System](#memory-system)
-6. [Task Execution](#task-execution)
-7. [Configuration](#configuration)
-8. [Visual Explorer (Streamlit UI)](#visual-explorer-streamlit-ui)
-9. [Advanced Usage](#advanced-usage)
-10. [Troubleshooting](#troubleshooting)
-
----
-
-## Introduction
-
-### What is MeMesh Plugin?
-
-MeMesh Plugin is a persistent memory plugin for Claude Code that helps you:
-
-- **Remember across sessions**: Persistent knowledge graph storage
-- **Execute tasks with context**: Memory-enhanced task execution
-- **Learn from experience**: Auto-tracking and pattern recognition
-- **Work efficiently**: Context-aware assistance
-
-### Architecture Overview
-
-MeMesh Plugin runs as a local-first MCP server:
-
-```
-Claude Code ──stdio──► MeMesh Plugin
- │
- ┌─────────┼─────────┐
- ▼ ▼ ▼
- ┌─────────┐ ┌────────┐ ┌──────────┐
- │ Router │ │ Memory │ │ Semantic │
- │ │ │ Graph │ │ Search │
- └─────────┘ └────────┘ └──────────┘
-```
-
-**Benefits:**
-- Local-first: All data stored locally, never transmitted
-- Zero configuration: Works out of the box
-- Vector semantic search: ONNX embeddings for intelligent recall
-- Persistent memory: Knowledge graph persists across sessions
-
----
-
-## Core Commands
-
-### buddy-do
-
-**Purpose**: Execute tasks with memory-enhanced context
-
-**Syntax**:
-```
-buddy-do ""
-```
-
-**How it works**:
-1. Analyzes task complexity and required capabilities
-2. Routes to the best-suited capability (backend, frontend, devops, etc.)
-3. Enhances prompt with project context
-4. Returns routing decision and enhanced prompt
-
-**Examples**:
-
-```bash
-# Backend development
-buddy-do "implement user authentication with JWT"
-# → Routes to backend-developer capability
-# → Analyzes database requirements, security considerations
-# → Provides enhanced prompt with project context
-
-# Frontend development
-buddy-do "create responsive navbar with dark mode toggle"
-# → Routes to frontend-developer capability
-# → Considers existing design system
-# → Suggests component structure
-
-# DevOps tasks
-buddy-do "setup CI/CD pipeline with GitHub Actions"
-# → Routes to devops capability
-# → Analyzes deployment requirements
-# → Provides workflow configuration
-
-# Bug fixes
-buddy-do "fix memory leak in user service"
-# → Routes to debugging capability
-# → Analyzes potential causes
-# → Suggests investigation steps
-```
-
-**Task Metadata Extraction**:
-
-buddy-do automatically extracts metadata from your task description:
-
-- **Goal**: What you want to achieve
-- **Reason**: Why you're doing this (if mentioned)
-- **Expected Outcome**: What success looks like
-
-Example:
-```
-buddy-do "add email verification because users need to confirm accounts"
-
-Extracted:
-- Goal: "add email verification"
-- Reason: "users need to confirm accounts"
-- Expected Outcome: (inferred from context)
-```
-
-**Response Structure**:
-
-```
-✓ BUDDY-DO SUCCESS
-
-📋 Task
-Setup user authentication with JWT
-
-────────────────────────────────────
-
-✓ Results
- routing:
- approved: true
- message: Task routed to backend-developer
- complexity: medium
- estimatedTokens: 2500
- estimatedCost: $0.0125
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Verify implementation meets requirements
- 2. Run tests to ensure nothing broke
- 3. Store decision: buddy-remember
-
-Duration: 2.3s • Tokens: 2,500
-```
-
-**Complexity Levels**:
-
-- **Simple** (< 1000 tokens): Quick tasks, simple queries
-- **Medium** (1000-5000 tokens): Standard features, moderate refactoring
-- **Complex** (> 5000 tokens): Architectural changes, large features
-
-**When to Use**:
-- ✅ Any development task (coding, testing, debugging)
-- ✅ Architectural decisions
-- ✅ Code reviews
-- ✅ Documentation tasks
-- ✅ Planning and analysis
-
-**When NOT to Use**:
-- ❌ Simple questions (use buddy-help instead)
-- ❌ Memory queries (use buddy-remember instead)
-
----
-
-### buddy-remember
-
-**Purpose**: Store and recall knowledge from your project's memory graph
-
-**Syntax**:
-```bash
-# Store knowledge
-buddy-remember ""
-
-# Recall knowledge (search)
-buddy-remember ""
-```
-
-**How it works**:
-
-**Storage Mode**:
-- Detects when you're providing information (not a question)
-- Stores in Knowledge Graph with auto-generated tags
-- Records timestamp and context
-- Returns confirmation
-
-**Recall Mode**:
-- Searches Knowledge Graph by keywords and semantic similarity
-- Ranks results by relevance
-- Returns matching memories with context
-- Suggests next steps if no results found
-
-**Storage Examples**:
-
-```bash
-# Store decisions
-buddy-remember "We decided to use PostgreSQL because it supports JSON and has better performance for complex queries"
-
-# Store patterns
-buddy-remember "All API endpoints follow RESTful conventions with /api/v1 prefix"
-
-# Store lessons learned
-buddy-remember "Login bug was caused by session timeout not being reset on activity. Fixed by updating session middleware"
-
-# Store configuration
-buddy-remember "Production uses AWS RDS with t3.medium instances, staging uses t3.micro"
-```
-
-**Recall Examples**:
-
-```bash
-# Search for decisions
-buddy-remember "why did we choose PostgreSQL?"
-# → Returns: Database selection decisions and reasons
-
-# Find patterns
-buddy-remember "API endpoint conventions"
-# → Returns: RESTful patterns, versioning strategy
-
-# Find solutions to similar problems
-buddy-remember "session timeout issues"
-# → Returns: Past bugs, solutions, and lessons learned
-
-# Check configuration
-buddy-remember "database configuration"
-# → Returns: Database settings, connection strings (sanitized)
-```
-
-**Response Structure (Storage)**:
-
-```
-✓ Memory Stored Successfully
-
-📋 Task
-Store project decision about database choice
-
-────────────────────────────────────
-
-✓ Results
- status: stored
- knowledge_id: "kb_1234567890"
- tags: ["decision", "database", "postgresql"]
- timestamp: "2026-01-20T10:30:00Z"
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Memory is now searchable
- 2. Try: buddy-remember "postgresql" to verify
-
-Duration: 0.8s • Tokens: 300
-```
-
-**Response Structure (Recall - With Results)**:
-
-```
-✓ Memory Search Complete
-
-📋 Query
-postgresql
-
-────────────────────────────────────
-
-✓ Results
- count: 3
- memories:
- 1. [2026-01-15] Decision: PostgreSQL for production
- "We decided to use PostgreSQL because..."
- Tags: decision, database, postgresql
-
- 2. [2026-01-18] Configuration: Database connection
- "Production: aws-rds-pg.xyz, port 5432..."
- Tags: configuration, database, postgresql
-
- 3. [2026-01-19] Lesson: Connection pooling
- "Fixed timeout issues by increasing pool size..."
- Tags: lesson, database, performance
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Review memories above for relevant context
- 2. Apply these learnings to your current task
-
-Duration: 1.2s • Tokens: 800
-```
-
-**Response Structure (Recall - No Results)**:
-
-```
-✓ Memory Search Complete
-
-📋 Query
-microservices
-
-────────────────────────────────────
-
-✓ Results
- count: 0
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Try a broader search term
- 2. Create new memory: buddy-do
-
-Duration: 0.5s • Tokens: 200
-```
-
-**Best Practices**:
-
-**Storage**:
-- ✅ Be specific and concise
-- ✅ Include context (why, when, what)
-- ✅ Store as you work, not later
-- ✅ Use natural language (the system handles tagging)
-
-**Recall**:
-- ✅ Use keywords from your question
-- ✅ Try broader terms if no results
-- ✅ Search before starting new work
-- ✅ Combine with buddy-do for context-aware tasks
-
-**Auto-Tagging**:
-
-The system automatically generates tags based on content:
-- **Entities**: Users, products, services, technologies
-- **Actions**: Created, updated, fixed, decided
-- **Concepts**: Authentication, database, API, testing
-- **Types**: Decision, lesson, pattern, configuration
-
----
-
-### buddy-help
-
-**Purpose**: Quick help and command reference
-
-**Syntax**:
-```bash
-# Basic help
-buddy-help
-
-# Detailed help for all commands
-buddy-help --all
-```
-
-**Output**:
-
-```
-🤖 MeMesh Quick Start
-
-Essential Commands
-
-┌────────────────────────────────────────────┐
-│ buddy-do "" │
-└────────────────────────────────────────────┘
-❯ buddy-do "add user authentication"
-→ Routes to backend-developer, creates auth system
-
-┌────────────────────────────────────────────┐
-│ buddy-remember "" │
-└────────────────────────────────────────────┘
-❯ buddy-remember "Using JWT for sessions"
-→ Stores in Knowledge Graph with auto-tags
-
-┌────────────────────────────────────────────┐
-│ buddy-remember "" │
-└────────────────────────────────────────────┘
-❯ buddy-remember "why JWT?"
-→ Searches and recalls past decisions
-
-💡 New to MeMesh?
-Run: memesh tutorial
-
-📖 Full reference: buddy-help --all
-```
-
-**When to Use**:
-- ✅ First-time setup (verify MeMesh is working)
-- ✅ Quick command reference
-- ✅ Syntax reminders
-
----
-
-## Real-World Examples
-
-Learn how MeMesh helps you build better software through real project scenarios. These examples demonstrate how MeMesh's persistent memory enhances your workflow across multiple sessions.
-
-### Example 1: Building a REST API (Multi-Day Workflow)
-
-This example shows how MeMesh remembers your architectural decisions and coding patterns across a week-long project.
-
-#### Day 1: Initial Setup
-
-**Your Task**: Start building a REST API for an e-commerce platform
-
-```bash
-You: buddy-do "create Express API with user authentication"
-```
-
-**What MeMesh Does**:
-- Analyzes requirements (Express, authentication, API)
-- Routes to backend-developer capability
-- Generates complete setup:
- - Express server with middleware
- - JWT authentication system
- - User model and database schema
- - Auth routes (/login, /register, /refresh)
-
-**MeMesh Automatically Remembers**:
-```
-✅ Tech stack: Express + MongoDB + JWT
-✅ Coding style: RESTful, async/await, error handling middleware
-✅ File structure: controllers/, models/, routes/, middleware/
-✅ Authentication: JWT with 15-min access tokens, 7-day refresh tokens
-```
-
-**Store Your Decision**:
-```bash
-You: buddy-remember "Using JWT for authentication because we need stateless auth for mobile app and easy horizontal scaling"
-```
-
----
-
-#### Day 3: Adding New Features
-
-**Your Task**: Add password reset functionality
-
-```bash
-You: buddy-do "add password reset feature"
-```
-
-**MeMesh Recalls**:
-```
-✅ Your auth architecture from Day 1 (JWT tokens, user model)
-✅ Email service setup (from Day 2)
-✅ Token generation pattern (consistent with existing auth)
-```
-
-**Result**:
-- Password reset integrates seamlessly with existing auth system
-- Uses same JWT library and token patterns
-- Follows your established API conventions (/api/v1/auth/reset-password)
-- No need to re-explain architecture
-
-**Store the New Pattern**:
-```bash
-You: buddy-remember "Password reset uses time-limited JWT tokens (15 min expiry) sent via email. Tokens are single-use and invalidated after use."
-```
-
----
-
-#### Day 7: Query Past Decisions
-
-**Your Task**: New team member asks about auth implementation
-
-```bash
-You: buddy-remember "why JWT over sessions?"
-```
-
-**MeMesh Returns**:
-```
-Found 3 relevant memories:
-
-1. [Day 1, 2026-01-15] Decision: JWT authentication
- "Using JWT for authentication because we need stateless
- auth for mobile app and easy horizontal scaling"
- Tags: authentication, jwt, architecture, decision
-
-2. [Day 3, 2026-01-17] Pattern: Token expiration
- "JWT tokens: 15-min access, 7-day refresh. Keeps security
- tight while maintaining good UX"
- Tags: jwt, security, tokens
-
-3. [Day 4, 2026-01-18] Lesson: Token refresh
- "Implemented silent refresh. Frontend checks token expiry
- and refreshes 5 minutes before expiration"
- Tags: jwt, frontend, refresh-token
-```
-
-**Benefit**: Complete context retrieved in seconds, no digging through Git history or Slack messages.
-
----
-
-#### Day 10: Bug Fix with Context
-
-**Your Task**: Login fails on mobile app
-
-```bash
-You: buddy-remember "mobile authentication"
-You: buddy-do "investigate mobile login failures"
-```
-
-**MeMesh Uses**:
-- Remembers mobile-specific requirements (from Day 1)
-- Recalls auth flow implementation (from Day 1-3)
-- Checks for related past bugs (searches memory graph)
-
-**Finds the Issue**: Cookie settings incompatible with mobile WebView
-
-**Store the Solution**:
-```bash
-You: buddy-remember "Mobile login bug: Cookie sameSite attribute must be 'none' for mobile WebView. Fixed by updating Express session config."
-```
-
----
-
-### Example 2: Frontend Development with Design System
-
-**Day 1: Setting Up the Design System**
-
-```bash
-You: buddy-do "create design system with Tailwind and component library"
-```
-
-**MeMesh Generates**:
-- Tailwind configuration with custom colors
-- Base component library (Button, Input, Card)
-- Typography scale
-- Spacing system
-
-**Store Design Decisions**:
-```bash
-You: buddy-remember "Design system uses 8px spacing scale. Primary color: #3B82F6 (blue-500). Components follow atomic design: atoms → molecules → organisms"
-```
-
----
-
-**Day 4: Building Feature Components**
-
-```bash
-You: buddy-do "create product card component with image, title, price, and add-to-cart button"
-```
-
-**MeMesh Recalls**:
-```
-✅ Design system spacing scale (8px grid)
-✅ Primary color (#3B82F6)
-✅ Existing Button component (reusable)
-✅ Component structure pattern (atomic design)
-```
-
-**Result**:
-- Product card follows established patterns
-- Uses existing Button component
-- Matches spacing and color scheme
-- No style inconsistencies
-
----
-
-**Day 8: Responsive Design**
-
-```bash
-You: buddy-remember "responsive breakpoints"
-```
-
-**MeMesh Returns**:
-```
-Found 2 relevant memories:
-
-1. [Day 2] Configuration: Tailwind breakpoints
- "Using default Tailwind breakpoints: sm (640px),
- md (768px), lg (1024px), xl (1280px)"
- Tags: tailwind, responsive, config
-
-2. [Day 5] Pattern: Mobile-first approach
- "All components built mobile-first, then enhanced
- for larger screens using md: and lg: prefixes"
- Tags: responsive, mobile-first, pattern
-```
-
-**Apply Consistently**: Now you know exactly how to make the new component responsive.
-
----
-
-### Example 3: DevOps and Infrastructure
-
-**Day 1: Setting Up CI/CD**
-
-```bash
-You: buddy-do "setup GitHub Actions CI/CD pipeline"
-```
-
-**Store Infrastructure Decisions**:
-```bash
-You: buddy-remember "Using GitHub Actions for CI/CD. Workflow: lint → test → build → deploy to Vercel. Runs on pull requests and main branch merges."
-```
-
----
-
-**Day 5: Adding Environment Variables**
-
-```bash
-You: buddy-remember "deployment configuration"
-You: buddy-do "add database URL environment variable to deployment"
-```
-
-**MeMesh Recalls**:
-- CI/CD workflow structure
-- Vercel deployment setup
-- Existing environment variable pattern
-
-**Adds Variable Correctly**: Uses GitHub Secrets, updates Vercel config, follows naming convention.
-
----
-
-**Week 2: Troubleshooting Deployment**
-
-```bash
-You: buddy-remember "CI/CD issues"
-```
-
-**MeMesh Returns**:
-```
-Found 1 relevant memory:
-
-[Day 8] Lesson: Build cache issue
-"GitHub Actions build was failing due to stale cache.
-Fixed by adding cache-dependency-path: package-lock.json
-to setup-node action"
-Tags: github-actions, ci-cd, cache, troubleshooting
-```
-
-**Benefit**: Quickly recall solutions to past problems, avoid repeating debugging sessions.
-
----
-
-### Key Takeaways
-
-#### 1. **Cross-Session Context**
-MeMesh maintains context across days and weeks:
-- No need to re-explain architecture
-- Past decisions inform current work
-- Consistent patterns throughout project
-
-#### 2. **Collaborative Memory**
-Perfect for teams:
-- Onboard new members faster
-- Share tribal knowledge
-- Document decisions as you make them
-
-#### 3. **Learning from Experience**
-Build institutional knowledge:
-- Record bugs and solutions
-- Track what works and what doesn't
-- Prevent repeating mistakes
-
-#### 4. **Natural Workflow**
-No disruption to your process:
-- Store knowledge as you work
-- Recall when you need it
-- Automatic memory linking
-
----
-
-### Next Steps
-
-**Try These Workflows**:
-1. **Start a new feature**: `buddy-do "plan [feature]"` → implement → store decisions
-2. **Debug an issue**: `buddy-remember "[error]"` → investigate → store solution
-3. **Review past work**: `buddy-remember "[topic]"` → understand context
-
-**Best Practices**:
-- ✅ Store decisions immediately (don't wait until later)
-- ✅ Be specific in memory descriptions (future you will thank you)
-- ✅ Query memory before starting new work (leverage past knowledge)
-- ✅ Record bugs and solutions (build your knowledge base)
-
-**Learn More**:
-- [BEST_PRACTICES.md](./BEST_PRACTICES.md) - Effective workflows and patterns
-- [Advanced Usage](#advanced-usage) - Complex scenarios and integrations
-- [Memory System](#memory-system) - How the knowledge graph works
-
----
-
-## MCP Tools
-
-### Advanced MCP Tools
-
-These tools provide lower-level access to MeMesh capabilities. For complete API documentation with detailed schemas, examples, and error handling, see **[API_REFERENCE.md](./api/API_REFERENCE.md)**.
-
-#### memesh-create-entities
-
-**Purpose**: Create knowledge entities with explicit relationships
-
-**Usage**: Advanced users who need fine-grained control over knowledge graph structure
-
-**Quick Example**:
-```json
-{
- "entities": [
- {
- "name": "PostgreSQL Database Choice 2026-02-03",
- "entityType": "decision",
- "observations": [
- "Chose PostgreSQL over MySQL",
- "Better JSON support and performance"
- ],
- "tags": ["database", "postgresql", "architecture"]
- }
- ]
-}
-```
-
-**When to Use**:
-- Building complex knowledge graphs
-- Migrating external knowledge
-- Integrating with other systems
-
-📖 **Full Documentation**: [API_REFERENCE.md - memesh-create-entities](./api/API_REFERENCE.md#memesh-create-entities)
-
-#### memesh-generate-tests
-
-**Purpose**: Generate automated test cases using AI
-
-**Aliases**: `generate-tests` (deprecated, will be removed in v3.0.0)
-
-**Parameters**:
-- `specification`: Feature spec (optional)
-- `code`: Source code (optional)
-
-**Note**: Provide either specification or code.
-
-📖 **Full Documentation**: [API_REFERENCE.md - memesh-generate-tests](./api/API_REFERENCE.md#memesh-generate-tests)
-
----
-
-### Learning & Error Tracking
-
-#### memesh-record-mistake
-
-**Purpose**: Record errors and mistakes for learning and prevention
-
-**Parameters**:
-- `action` (required): What action the AI took
-- `errorType` (required): Error classification (`procedure-violation`, `workflow-skip`, `assumption-error`, `validation-skip`, `responsibility-lack`, `firefighting`, `dependency-miss`, `integration-error`, `deployment-error`)
-- `userCorrection` (required): User's correction/feedback
-- `correctMethod` (required): What should have been done instead
-- `impact` (required): Impact of the mistake
-- `preventionMethod` (required): How to prevent in future
-- `relatedRule` (optional): Related rule/guideline
-- `context` (optional): Additional context object
-
-**Quick Example**:
-```json
-{
- "action": "Used synchronous file read in async handler",
- "errorType": "assumption-error",
- "userCorrection": "This blocks the event loop",
- "correctMethod": "Use fs.promises.readFile() instead of fs.readFileSync()",
- "impact": "Server becomes unresponsive under load",
- "preventionMethod": "Always use async I/O in request handlers"
-}
-```
-
-**When to Use**:
-- After fixing bugs
-- Learning from errors
-- Building team knowledge
-- Preventing repeated mistakes
-
-**Benefits**:
-- Automatically stored in knowledge graph
-- Searchable via buddy-remember
-- Helps prevent repeating errors
-- Builds institutional knowledge
-
-📖 **Full Documentation**: [API_REFERENCE.md - memesh-record-mistake](./api/API_REFERENCE.md#memesh-record-mistake)
-
----
-
-### Complete API Reference
-
-For comprehensive documentation including:
-- Detailed input/output schemas
-- JSON examples for all tools
-- Error codes and handling
-- Integration patterns
-- Performance characteristics
-
-See **[API_REFERENCE.md](./api/API_REFERENCE.md)**
-
----
-
-## CLI Commands
-
-### memesh setup
-
-**Purpose**: Interactive configuration wizard
-
-**Features**:
-- Auto-detects Claude Code installation
-- Generates MCP configuration
-- Validates setup
-- Tests connection
-
-**Usage**:
-```bash
-memesh setup
-```
-
-**When to Use**:
-- First-time installation
-- Troubleshooting connection issues
-- Reconfiguring after updates
-
-See [QUICK_START.md](./QUICK_START.md) for detailed setup guide.
-
----
-
-### memesh tutorial
-
-**Purpose**: Interactive 5-minute guided tutorial
-
-**Features**:
-- 7-step walkthrough
-- Hands-on practice with buddy-do and buddy-remember
-- Progress tracking
-- Completion certificate
-
-**Usage**:
-```bash
-memesh tutorial
-```
-
-**Steps**:
-1. Welcome & Overview
-2. Setup Verification
-3. First buddy-do Command
-4. Memory Storage Demo
-5. Memory Recall Demo
-6. Advanced Features Preview
-7. Next Steps & Resources
-
-**When to Use**:
-- Learning MeMesh for the first time
-- Refreshing your knowledge
-- Training team members
-
----
-
-### memesh dashboard
-
-**Purpose**: View session health and metrics
-
-**Features**:
-- Real-time MCP server status
-- Memory usage statistics
-- Recent command history
-- Performance metrics
-- Error log summary
-
-**Usage**:
-```bash
-memesh dashboard
-```
-
----
-
-### memesh stats
-
-**Purpose**: View usage statistics
-
-**Features**:
-- Command frequency analysis
-- Token usage trends
-- Cost tracking
-- Capability usage breakdown
-- Memory growth over time
-
-**Usage**:
-```bash
-memesh stats
-memesh stats --day # Last 24 hours
-memesh stats --week # Last 7 days
-memesh stats --month # Last 30 days
-memesh stats --json # Export as JSON
-memesh stats --csv # Export as CSV
-memesh stats --verbose # Detailed statistics
-```
-
----
-
-### memesh config
-
-**Purpose**: Manage MeMesh configuration
-
-**Subcommands**:
-
-```bash
-# Show current configuration
-memesh config show
-
-# Validate MCP setup
-memesh config validate
-
-# Edit configuration in default editor
-memesh config edit
-
-# Reset configuration to defaults
-memesh config reset
-```
-
-**When to Use**:
-- Verifying setup after installation
-- Troubleshooting connection issues
-- Checking configuration paths
-
----
-
-### memesh report-issue
-
-**Purpose**: Report bugs or issues
-
-**Usage**:
-```bash
-memesh report-issue
-```
-
-**What it does**:
-- Provides GitHub issues link
-- Collects system information (future)
-- Suggests troubleshooting steps
-
----
-
-## Memory System
-
-### Knowledge Graph Architecture
-
-MeMesh uses a graph-based knowledge storage system:
-
-```
- ┌─────────────┐
- │ Entities │ (Users, APIs, Technologies, etc.)
- └──────┬──────┘
- │
- │ has properties
- │
- ┌──────▼──────┐
- │ Properties │ (name, type, metadata)
- └──────┬──────┘
- │
- │ connected by
- │
- ┌──────▼──────┐
- │ Relations │ (USES, DEPENDS_ON, CREATED_BY)
- └─────────────┘
-```
-
-### Entity Types
-
-**Automatic Classification**:
-- `Decision`: Architecture choices, technology selections
-- `Pattern`: Code patterns, conventions, standards
-- `Lesson`: Bug fixes, learnings, best practices
-- `Configuration`: Settings, environment variables
-- `Technology`: Tools, frameworks, libraries
-- `Feature`: Application features, capabilities
-- `Bug`: Issues, problems, error cases
-- `Person`: Team members, stakeholders
-- `Project`: Projects, repositories, systems
-
-### Relationship Types
-
-- `USES`: Entity A uses Entity B
-- `DEPENDS_ON`: Entity A depends on Entity B
-- `CREATED_BY`: Entity A created by Entity B
-- `RELATES_TO`: General relationship
-- `PART_OF`: Entity A is part of Entity B
-
-### Auto-Tracking (Phase 0.6)
-
-**Task Start Tracking**:
-When you use `buddy-do`, MeMesh automatically records:
-- Task description
-- Goal (extracted)
-- Reason (if provided)
-- Expected outcome (if mentioned)
-- Start timestamp
-
-**Memory Linking**:
-Memories created during a task are automatically linked to that task.
-
-**Example**:
-```bash
-buddy-do "implement login feature because users need authentication"
-
-# Auto-tracked:
-{
- task: "implement login feature",
- goal: "implement login feature",
- reason: "users need authentication",
- timestamp: "2026-01-20T10:00:00Z"
-}
-
-# Later memories automatically linked:
-buddy-remember "Using bcrypt for password hashing"
-# → Links to login feature task
-```
-
----
-
-## Task Execution
-
-### How Task Execution Works
-
-**1. Task Analysis**:
-```
-Input: "setup user authentication with JWT"
-
-Analysis:
-- Complexity: Medium (~2500 tokens)
-- Domain: Backend development
-- Required capabilities: [authentication, backend, database]
-- Keywords: [authentication, JWT, user, setup]
-```
-
-**2. Capability Matching**:
-```
-Available capabilities:
-- backend-developer: 90% match (authentication, database)
-- frontend-developer: 20% match (user interface)
-- devops: 30% match (deployment considerations)
-
-Selected: backend-developer (highest match)
-```
-
-**3. Prompt Enhancement**:
-```
-Enhanced Prompt:
-[System Context]
-Project: e-commerce-platform
-Tech Stack: Node.js, Express, PostgreSQL
-Recent Work: User registration endpoint completed
-
-[Task]
-Setup user authentication with JWT
-
-[Context]
-- Existing user model in database
-- JWT library already installed (jsonwebtoken)
-- Environment variables configured for secrets
-```
-
-### Capability Catalog
-
-**backend-developer**:
-- API development
-- Database design
-- Server-side logic
-- Authentication/authorization
-- Data validation
-
-**frontend-developer**:
-- UI components
-- State management
-- Styling and layout
-- Client-side logic
-- Responsive design
-
-**devops**:
-- CI/CD pipelines
-- Deployment automation
-- Infrastructure configuration
-- Monitoring and logging
-- Container orchestration
-
-**database-admin**:
-- Schema design
-- Query optimization
-- Migrations
-- Backups and recovery
-- Performance tuning
-
-**security-expert**:
-- Vulnerability assessment
-- Authentication systems
-- Data encryption
-- Security audits
-- Compliance
-
-**general-agent**:
-- Default fallback
-- General questions
-- Documentation
-- Planning and analysis
-
----
-
-## Configuration
-
-### MCP Configuration
-
-MeMesh is a Claude Code Plugin. The MCP server is auto-managed via the plugin's `.mcp.json` file — no manual configuration of `~/.claude/mcp_settings.json` is needed.
-
-Simply install MeMesh and restart Claude Code. If auto-configuration fails, run `memesh setup`.
-
-**For Claude Desktop** (not Claude Code CLI), see the Claude Desktop documentation for MCP configuration locations:
-- **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
-- **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
-- **Linux**: `~/.config/Claude/claude_desktop_config.json`
-
-**Environment Variables**:
-
-- `DEBUG`: Enable debug logging (true/false)
-- `MEMESH_DATA_DIR`: Custom data directory (default: ~/.memesh)
-- `MEMESH_LOG_LEVEL`: Log level (error/warn/info/debug)
-
----
-
-## Visual Explorer (Streamlit UI)
-
-MeMesh includes an interactive web UI for exploring your knowledge graph visually.
-
-### Setup
-
-```bash
-cd streamlit
-pip install -r requirements.txt
-streamlit run app.py
-```
-
-### Dashboard
-
-The dashboard provides an overview of your knowledge base:
-
-- **Statistics cards**: Entity, relation, observation, and tag counts
-- **Entity Type Distribution**: Pie chart showing the breakdown of entity types
-- **Top Tags**: Bar chart of most frequently used tags over time
-- **Entity Growth**: Cumulative growth chart (day/week/month granularity)
-- **Recent Entities**: Table of latest entities with tag and observation counts
-
-### KG Explorer
-
-The Knowledge Graph Explorer provides an interactive graph visualization:
-
-- **Interactive graph**: Drag, zoom, and hover over nodes and edges
-- **Color-coded nodes**: Each entity type has a distinct color
-- **Relation edges**: Color-coded by relation type (similar_to, solves, caused_by, etc.)
-- **Filters**: Search by text (FTS5), entity type, tags, and date range
-- **Adjustable density**: Slider to control the number of displayed nodes (10–300)
-
-### Backfilling Relations
-
-If you have existing entities without relations, run the backfill script:
-
-```bash
-# Preview what will be created
-python streamlit/backfill_relations.py --dry-run
-
-# Execute (creates relations in the database)
-python streamlit/backfill_relations.py
-```
-
-The script uses a 3-layer strategy: topic/project clustering, cross-type semantic relations, and tag-based similarity.
-
----
-
-## Advanced Usage
-
-### Workflow Examples
-
-#### Workflow 1: Starting a New Feature
-
-```bash
-# 1. Recall relevant context
-buddy-remember "similar features"
-buddy-remember "architectural patterns"
-
-# 2. Plan implementation
-buddy-do "plan user profile feature with avatar upload"
-
-# 3. Execute implementation
-buddy-do "implement user profile API endpoints"
-buddy-do "create profile UI component"
-
-# 4. Store decisions
-buddy-remember "User profile feature uses S3 for avatar storage because it scales better"
-
-# 5. Document patterns
-buddy-remember "Profile endpoints follow /api/v1/users/:id/profile pattern"
-```
-
-#### Workflow 2: Debugging a Bug
-
-```bash
-# 1. Search for similar issues
-buddy-remember "login errors"
-buddy-remember "session timeout"
-
-# 2. Analyze and fix
-buddy-do "investigate why sessions expire immediately after login"
-
-# 3. Record solution
-buddy-remember "Login session bug was caused by cookie domain mismatch. Fixed by setting domain to null in session config."
-```
-
-#### Workflow 3: Code Review
-
-```bash
-# 1. Recall standards
-buddy-remember "code review checklist"
-buddy-remember "security best practices"
-
-# 2. Review implementation
-buddy-do "review authentication implementation for security issues"
-
-# 3. Store findings
-buddy-remember "Security review found: need rate limiting on login endpoint to prevent brute force"
-```
-
-### Integration with Other Tools
-
-**Git Integration**:
-```bash
-# Store commit messages as memories
-buddy-remember "feat(auth): add JWT authentication"
-
-# Recall to maintain consistency
-buddy-remember "recent authentication changes"
-```
-
-**CI/CD Integration** (Future):
-```bash
-# Store deployment info
-buddy-remember "Deployment v1.2.3 to production on 2026-01-20, includes authentication feature"
-
-# Query deployment history
-buddy-remember "recent deployments"
-```
-
-**Testing Integration**:
-```bash
-# Store test results
-buddy-remember "Test suite passing: 245/245 tests, coverage 87%"
-
-# Track test patterns
-buddy-remember "Authentication tests use mock JWT tokens"
-```
-
----
-
-## Troubleshooting
-
-### Common Issues
-
-For detailed troubleshooting, see [TROUBLESHOOTING.md](./TROUBLESHOOTING.md).
-
-**Quick Fixes**:
-
-1. **buddy-help not working**
- ```bash
- memesh setup
- # Restart Claude Code
- # Try: buddy-help
- ```
-
-2. **Connection errors**
- ```bash
- memesh config validate
- # Check configuration
- # Restart Claude Code
- ```
-
-3. **Slow responses**
- - Simplify task descriptions
- - Check network connection
- - Review token limits
-
-### Debug Mode
-
-Enable debug logging by setting environment variables `DEBUG=true` and `MEMESH_LOG_LEVEL=debug` in the plugin's `.mcp.json` configuration.
-
-Check logs:
-- **macOS**: `~/Library/Logs/Claude/`
-- **Windows**: `%APPDATA%\Claude\Logs\`
-
-### Getting Help
-
-1. **Check Documentation**:
- - [QUICK_START.md](./QUICK_START.md) - Getting started
- - [BEST_PRACTICES.md](./BEST_PRACTICES.md) - Effective workflows
- - [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) - Common issues
-
-2. **Run Commands**:
- ```bash
- memesh tutorial # Interactive learning
- memesh config validate # Check setup
- memesh report-issue # Get support
- ```
-
-3. **Community Support**:
- - GitHub Issues: https://github.com/PCIRCLE-AI/claude-code-buddy/issues
- - Discussions: https://github.com/PCIRCLE-AI/claude-code-buddy/discussions
-
----
-
-## Appendix
-
-### Command Quick Reference
-
-```
-┌─────────────────────────────────────────────────────┐
-│ MeMesh Command Reference │
-├─────────────────────────────────────────────────────┤
-│ MCP Tools (In Claude Code) │
-│ buddy-do "" Execute with memory context│
-│ buddy-remember "" Store/recall memory │
-│ buddy-help Quick help guide │
-│ │
-│ CLI Commands (In Terminal) │
-│ memesh setup Interactive setup wizard │
-│ memesh tutorial 5-minute guided tour │
-│ memesh dashboard Session health dashboard │
-│ memesh stats Usage statistics │
-│ memesh config Manage configuration │
-│ memesh report-issue Bug reporting │
-└─────────────────────────────────────────────────────┘
-```
-
-### Glossary
-
-- **MCP**: Model Context Protocol - Standard for AI tool integration
-- **Knowledge Graph**: Graph database storing entities and relationships
-- **Capability**: Specialized skill set for task execution
-- **Entity**: Node in knowledge graph (user, technology, decision, etc.)
-- **Relation**: Edge connecting entities (USES, DEPENDS_ON, etc.)
-- **Task Metadata**: Extracted information from task description
-- **Prompt Enhancement**: Adding project context to task prompts
-
-### Version History
-
-- **v2.8.0**: Tool naming unification (memesh-* prefix), A2A removal (local-first architecture), Vector semantic search with ONNX embeddings
-- **v2.7.0**: Memory retention periods updated (30/90 days), Auto-memory hooks improvements, Documentation updates
-- **v2.6.6**: ErrorClassifier integration, Enhanced error handling
-- **v2.6.5**: Interactive tutorial, Improved QUICK_START
-- **v2.6.4**: Response formatting improvements, Visual hierarchy
-- **v2.6.3**: Interactive setup wizard
-- **v2.6.2**: Phase 0.6 - Auto-tracking and memory linking
-- **v2.6.1**: Performance optimizations
-- **v2.6.0**: Smart routing with capability matching
-
----
-
-**Next Steps**:
-
-1. **Learn More**: Read [BEST_PRACTICES.md](./BEST_PRACTICES.md) for effective workflows
-2. **Try It Out**: Run `memesh tutorial` for hands-on practice
-3. **Get Support**: Visit our [GitHub Discussions](https://github.com/PCIRCLE-AI/claude-code-buddy/discussions)
-4. **Contribute**: Check [CONTRIBUTING.md](../CONTRIBUTING.md) to get involved
-
----
-
-**MeMesh** — Persistent memory for Claude Code
diff --git a/docs/api/API_REFERENCE.md b/docs/api/API_REFERENCE.md
index a76b2190..eb4e4264 100644
--- a/docs/api/API_REFERENCE.md
+++ b/docs/api/API_REFERENCE.md
@@ -1,1951 +1,244 @@
-# MeMesh Plugin - API Reference
+# MeMesh Plugin -- API Reference
-**Protocol**: Model Context Protocol (MCP)
+**Protocol**: Model Context Protocol (MCP) over stdio
+**Version**: 3.0.0
---
-## Table of Contents
-
-1. [Introduction](#introduction)
-2. [Authentication & Connection](#authentication--connection)
-3. [Tool Catalog](#tool-catalog)
-4. [Core Tools](#core-tools)
- - [buddy-do](#buddy-do)
- - [buddy-remember](#buddy-remember)
- - [buddy-help](#buddy-help)
-5. [Knowledge Graph Tools](#knowledge-graph-tools)
- - [memesh-create-entities](#memesh-create-entities)
-6. [Learning & Automation Tools](#learning--automation-tools)
- - [memesh-record-mistake](#memesh-record-mistake)
- - [memesh-hook-tool-use](#memesh-hook-tool-use)
-7. [System Tools](#system-tools)
- - [memesh-generate-tests](#memesh-generate-tests)
- - [memesh-metrics](#memesh-metrics)
-8. [Data Models](#data-models)
-9. [Error Reference](#error-reference)
-10. [Integration Examples](#integration-examples)
-11. [Rate Limits & Performance](#rate-limits--performance)
+## Tools
----
-
-## Introduction
-
-MeMesh Plugin is an MCP (Model Context Protocol) server that provides persistent memory management, context-aware task execution, and knowledge graph capabilities for Claude Code. This API reference documents all available MCP tools, their parameters, responses, and usage patterns.
-
-**Key Features**:
-- Context-aware task execution with memory integration
-- Persistent knowledge graph storage
-- Automatic memory linking and tagging
-- Session health monitoring
-- Test generation capabilities
-
-**Architecture**:
-```
-Claude Code CLI
- ↓
-MCP Protocol
- ↓
-MeMesh Plugin
- ↓
- ┌──────┴──────┐
- ↓ ↓
-Router Knowledge Graph
-```
-
----
-
-## Authentication & Connection
-
-MeMesh Plugin is a Claude Code Plugin. The MCP server is auto-managed via the plugin's `.mcp.json` file — no manual configuration of `~/.claude/mcp_settings.json` is needed.
-
-Simply install MeMesh Plugin and restart Claude Code. The plugin system handles MCP server registration automatically.
-
-**If auto-configuration fails**, run `memesh setup` to reconfigure.
-
-**Advanced Configuration**:
-```json
-{
- "mcpServers": {
- "memesh": {
- "command": "npx",
- "args": ["-y", "@pcircle/memesh"],
- "env": {
- "DEBUG": "true",
- "MEMESH_DATA_DIR": "/custom/path/to/data",
- "MEMESH_LOG_LEVEL": "debug"
- }
- }
- }
-}
-```
-
-**Environment Variables**:
-
-| Variable | Type | Default | Description |
-|----------|------|---------|-------------|
-| `DEBUG` | boolean | `false` | Enable debug logging |
-| `MEMESH_DATA_DIR` | string | `~/.memesh` | Data directory for knowledge graph |
-| `MEMESH_LOG_LEVEL` | string | `info` | Log level: error, warn, info, debug |
-
----
-
-## Tool Catalog
-
-MeMesh provides 8 MCP tools organized into three categories:
-
-### Core Tools (User-Facing)
-
-| Tool | Purpose | Complexity |
-|------|---------|-----------|
-| `buddy-do` | Execute tasks with memory context | Simple |
-| `buddy-remember` | Store and recall project memory | Simple |
-| `buddy-help` | Get help and command reference | Simple |
-
-### Knowledge Graph Tools (Advanced)
-
-| Tool | Purpose | Complexity |
-|------|---------|-----------|
-| `memesh-create-entities` | Create knowledge entities with auto-relations | Advanced |
-
-### Learning & Automation Tools
-
-| Tool | Purpose | Complexity |
-|------|---------|-----------|
-| `memesh-record-mistake` | Record mistakes for continuous learning | Medium |
-| `memesh-hook-tool-use` | Process tool execution events (auto-triggered) | Internal |
-
-### System Tools
-
-| Tool | Purpose | Complexity |
-|------|---------|-----------|
-| `memesh-generate-tests` | Generate test cases from specs or code | Medium |
-| `memesh-metrics` | View session metrics and memory status | Simple |
-
----
-
-## Core Tools
-
-### buddy-do
-
-**Purpose**: Execute tasks with intelligent routing to specialized capabilities.
-
-**Use Cases**:
-- Development tasks (backend, frontend, DevOps)
-- Architectural decisions
-- Code reviews and debugging
-- Documentation tasks
-- Complex planning and analysis
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "task": {
- "type": "string",
- "description": "Task description to analyze and enrich (e.g., 'setup authentication', 'fix login bug')"
- }
- },
- "required": ["task"]
-}
-```
-
-#### Parameters
-
-| Parameter | Type | Required | Description | Example |
-|-----------|------|----------|-------------|---------|
-| `task` | string | Yes | Task description in natural language | "implement user authentication with JWT" |
-
-#### Response Format
-
-```typescript
-{
- content: [
- {
- type: "text",
- text: string // Formatted response with routing decision
- }
- ]
-}
-```
-
-**Response Structure** (parsed from formatted text):
-- Task description
-- Routing decision (approved/rejected)
-- Capability focus (e.g., backend, frontend)
-- Complexity level (simple, medium, complex)
-- Estimated tokens
-- Estimated cost
-- Enhanced prompt (for Claude to execute)
-- Duration and statistics
-
-#### Examples
-
-**Example 1: Backend Development Task**
-
-Request:
-```json
-{
- "task": "implement user authentication with JWT tokens"
-}
-```
-
-Response:
-```
-✓ BUDDY-DO SUCCESS
-
-📋 Task
-implement user authentication with JWT tokens
-
-────────────────────────────────────
-
-✓ Results
- routing:
- approved: true
- message: Task routed for capabilities: backend, authentication
- capabilityFocus: ["backend", "authentication"]
- complexity: medium
- estimatedTokens: 2500
- estimatedCost: 0.0125
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Review implementation for security best practices
- 2. Add tests for authentication flows
- 3. Store decision: buddy-remember
-
-Duration: 1.2s • Tokens: 2,500
-```
-
-**Example 2: Frontend Component**
-
-Request:
-```json
-{
- "task": "create responsive navigation bar with dark mode toggle"
-}
-```
-
-Response:
-```
-✓ BUDDY-DO SUCCESS
-
-📋 Task
-create responsive navigation bar with dark mode toggle
-
-────────────────────────────────────
-
-✓ Results
- routing:
- approved: true
- message: Task routed for capabilities: frontend, ui-design
- capabilityFocus: ["frontend", "ui-design"]
- complexity: medium
- estimatedTokens: 1800
- estimatedCost: 0.009
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Test across different screen sizes
- 2. Verify accessibility standards
- 3. Store component patterns: buddy-remember
-
-Duration: 0.9s • Tokens: 1,800
-```
-
-**Example 3: Bug Investigation**
-
-Request:
-```json
-{
- "task": "investigate why sessions expire immediately after login"
-}
-```
-
-Response:
-```
-✓ BUDDY-DO SUCCESS
-
-📋 Task
-investigate why sessions expire immediately after login
-
-────────────────────────────────────
-
-✓ Results
- routing:
- approved: true
- message: Task routed for capabilities: debugging, backend
- capabilityFocus: ["debugging", "backend"]
- complexity: medium
- estimatedTokens: 2200
- estimatedCost: 0.011
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Check session configuration
- 2. Review cookie settings
- 3. Document fix: buddy-remember
-
-Duration: 1.5s • Tokens: 2,200
-```
-
-#### Task Metadata Extraction
-
-buddy-do automatically extracts metadata from task descriptions:
-
-| Field | Pattern | Example |
-|-------|---------|---------|
-| `goal` | First sentence or "to X" | "implement login feature" |
-| `reason` | "because X", "so that X" | "users need authentication" |
-| `expectedOutcome` | "should X", "will X" | "users can log in securely" |
-
-**Example**:
-```json
-{
- "task": "add email verification because users need to confirm their accounts"
-}
-```
-
-Extracted metadata:
-```typescript
-{
- goal: "add email verification",
- reason: "users need to confirm their accounts",
- expectedOutcome: undefined // Not explicitly stated
-}
-```
-
-#### Complexity Levels
-
-| Level | Token Range | Characteristics | Examples |
-|-------|-------------|-----------------|----------|
-| Simple | < 1,000 | Quick tasks, simple queries | "format this JSON", "explain this function" |
-| Medium | 1,000-5,000 | Standard features, refactoring | "add user profile page", "refactor auth service" |
-| Complex | > 5,000 | Architecture, large features | "redesign database schema", "build payment system" |
-
-#### Best Practices
-
-✅ **Do**:
-- Use clear, specific task descriptions
-- Include context when relevant (e.g., "because we need...")
-- Mention expected outcomes
-- Use for all development tasks
-
-❌ **Don't**:
-- Use for simple questions (use buddy-help instead)
-- Use for memory searches (use buddy-remember instead)
-- Write vague descriptions ("fix stuff")
-- Include sensitive credentials in task text
-
-#### Error Responses
-
-```typescript
-// Invalid input
-{
- error: "Validation failed: task is required",
- code: "VALIDATION_FAILED"
-}
-
-// Routing failure
-{
- error: "Unable to route task: no matching capabilities",
- code: "ROUTING_FAILED"
-}
-
-// System error
-{
- error: "Router unavailable",
- code: "OPERATION_FAILED"
-}
-```
-
----
-
-### buddy-remember
-
-**Purpose**: Store and recall knowledge from your project's memory graph.
-
-**Use Cases**:
-- Store architectural decisions
-- Record bug fixes and solutions
-- Save API design patterns
-- Document project conventions
-- Search for past decisions
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "query": {
- "type": "string",
- "description": "Search query (natural language supported for semantic search)"
- },
- "mode": {
- "type": "string",
- "enum": ["semantic", "keyword", "hybrid"],
- "description": "Search mode: semantic (AI similarity), keyword (exact match), hybrid (both combined). Default: hybrid"
- },
- "limit": {
- "type": "number",
- "description": "Maximum number of results to return (1-50, default: 10)",
- "minimum": 1,
- "maximum": 50
- },
- "matchThreshold": {
- "type": "number",
- "description": "Minimum match score (0-1). Higher values return fewer but more relevant results. Default: 0.3",
- "minimum": 0,
- "maximum": 1
- },
- "allProjects": {
- "type": "boolean",
- "description": "Search across all projects (default: false, searches only current project + global memories)"
- }
- },
- "required": ["query"]
-}
-```
-
-#### Parameters
-
-| Parameter | Type | Required | Default | Description | Example |
-|-----------|------|----------|---------|-------------|---------|
-| `query` | string | Yes | - | Search query (natural language supported) | "why did we choose PostgreSQL?" |
-| `mode` | string | No | hybrid | Search mode: `semantic`, `keyword`, `hybrid` | "keyword" |
-| `limit` | number | No | 10 | Max number of results (1-50) | 20 |
-| `matchThreshold` | number | No | 0.3 | Minimum match score (0-1) | 0.5 |
-| `allProjects` | boolean | No | false | Search across all projects | true |
-
-#### Response Format
-
-```typescript
-{
- content: [
- {
- type: "text",
- text: string // Formatted response with memories
- }
- ]
-}
-```
-
-**Response Structure** (parsed from formatted text):
-- Query text
-- Memory count
-- Memory list (with timestamps, tags, content)
-- Suggestions (if no results)
-
-#### Examples
-
-**Example 1: Store Decision**
-
-Request:
-```json
-{
- "query": "We chose PostgreSQL because it supports JSON columns and has better performance for complex queries than MySQL"
-}
-```
-
-Response:
-```
-✓ Memory Stored Successfully
-
-📋 Task
-Store project decision
-
-────────────────────────────────────
-
-✓ Results
- status: stored
- knowledge_id: "kb_1738560000123"
- tags: ["decision", "database", "postgresql"]
- timestamp: "2026-02-03T10:30:00Z"
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Memory is now searchable
- 2. Try: buddy-remember "postgresql" to verify
-
-Duration: 0.8s • Tokens: 300
-```
-
-**Example 2: Recall Memories (With Results)**
-
-Request:
-```json
-{
- "query": "database decisions",
- "limit": 3
-}
-```
-
-Response:
-```
-✓ Memory Search Complete
-
-📋 Query
-database decisions
-
-────────────────────────────────────
-
-✓ Results
- count: 3
- memories:
- 1. [2026-02-01] Decision: PostgreSQL for production
- "We chose PostgreSQL because it supports JSON..."
- Tags: decision, database, postgresql
-
- 2. [2026-02-02] Configuration: Database pooling
- "Using pg-pool with max 20 connections..."
- Tags: configuration, database, performance
-
- 3. [2026-02-03] Lesson: Connection timeout fix
- "Fixed timeouts by increasing pool timeout to 30s..."
- Tags: lesson, database, bug_fix
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Review memories above for relevant context
- 2. Apply these learnings to your current task
-
-Duration: 1.1s • Tokens: 800
-```
-
-**Example 3: No Results Found**
-
-Request:
-```json
-{
- "query": "blockchain integration"
-}
-```
-
-Response:
-```
-✓ Memory Search Complete
-
-📋 Query
-blockchain integration
-
-────────────────────────────────────
-
-✓ Results
- count: 0
-
-────────────────────────────────────
-
-💡 Next Steps
- 1. Try a broader search term
- 2. Create new memory: buddy-remember
- 3. Use different keywords
-
-Duration: 0.5s • Tokens: 200
-```
-
-#### Storage vs. Recall Detection
-
-MeMesh automatically detects whether you're storing or recalling:
-
-**Storage Indicators**:
-- Declarative statements
-- Past tense verbs ("decided", "chose", "implemented")
-- Contains factual information
-- Longer descriptions (> 50 chars)
-
-**Recall Indicators**:
-- Question words ("why", "how", "what", "when")
-- Question marks
-- Short queries
-- Search keywords
-
-#### Auto-Tagging
-
-MeMesh automatically generates tags based on content:
-
-**Entity Types**:
-- Technologies: postgresql, jwt, react, node.js
-- Concepts: authentication, database, api, testing
-- Actions: created, fixed, decided, refactored
-
-**Tag Categories**:
-- `decision` - Architectural choices
-- `lesson` - Learnings from bugs/issues
-- `pattern` - Code patterns and conventions
-- `configuration` - Settings and configs
-
-#### Best Practices
-
-✅ **Storage Best Practices**:
-- Be specific and concise
-- Include context (why, when, what)
-- Store as you work (not later)
-- Use natural language
-- Include reasons for decisions
-
-✅ **Recall Best Practices**:
-- Use keywords from your question
-- Try broader terms if no results
-- Search before starting new work
-- Combine with buddy-do for context
-
-❌ **Avoid**:
-- Storing sensitive credentials
-- Vague descriptions
-- Duplicate storage
-- Overly long queries
-
-#### Error Responses
-
-```typescript
-// Invalid input
-{
- error: "Validation failed: query is required",
- code: "VALIDATION_FAILED"
-}
-
-// Storage error
-{
- error: "Failed to store memory: database unavailable",
- code: "OPERATION_FAILED"
-}
-
-// Search error
-{
- error: "Memory search failed",
- code: "OPERATION_FAILED"
-}
-```
-
----
-
-### buddy-help
-
-**Purpose**: Get help, command reference, and usage examples.
-
-**Use Cases**:
-- First-time setup verification
-- Quick command syntax reference
-- Learn available commands
-- View examples and patterns
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "command": {
- "type": "string",
- "description": "Specific command to get help for (e.g., 'do', 'remember', '--all' for full reference)",
- "optional": true
- }
- }
-}
-```
-
-#### Parameters
-
-| Parameter | Type | Required | Description | Example |
-|-----------|------|----------|-------------|---------|
-| `command` | string | No | Command name or "--all" for full reference | "do", "--all" |
-
-#### Response Format
-
-```typescript
-{
- content: [
- {
- type: "text",
- text: string // Formatted help text
- }
- ]
-}
-```
-
-#### Examples
-
-**Example 1: Basic Help**
-
-Request:
-```json
-{
- "command": undefined
-}
-```
-
-Response:
-```
-🤖 MeMesh Quick Start
-
-Essential Commands
-
-┌────────────────────────────────────────────┐
-│ buddy-do "" │
-└────────────────────────────────────────────┘
-❯ buddy-do "add user authentication"
-→ Routes to backend-developer, creates auth system
-
-┌────────────────────────────────────────────┐
-│ buddy-remember "" │
-└────────────────────────────────────────────┘
-❯ buddy-remember "Using JWT for sessions"
-→ Stores in Knowledge Graph with auto-tags
-
-┌────────────────────────────────────────────┐
-│ buddy-remember "" │
-└────────────────────────────────────────────┘
-❯ buddy-remember "why JWT?"
-→ Searches and recalls past decisions
-
-💡 New to MeMesh?
-Run: memesh tutorial
-
-📖 Full reference: buddy-help --all
-```
-
-**Example 2: Full Reference**
-
-Request:
-```json
-{
- "command": "--all"
-}
-```
-
-Response: (Shows detailed reference for all commands with examples)
-
-**Example 3: Specific Command**
-
-Request:
-```json
-{
- "command": "do"
-}
-```
-
-Response: (Shows detailed help for buddy-do command)
-
-#### Best Practices
-
-✅ **When to Use**:
-- First time using MeMesh
-- Forgot command syntax
-- Need examples
-- Want to see all available commands
-
-❌ **Don't Use For**:
-- Actual task execution (use buddy-do)
-- Memory searches (use buddy-remember)
+MeMesh exposes 3 tools via MCP.
---
-## Knowledge Graph Tools
-
-### memesh-create-entities
-
-**Purpose**: Create knowledge entities with explicit structure and relationships.
-
-**Use Cases**:
-- Build complex knowledge graphs
-- Store structured architectural decisions
-- Record feature implementations
-- Document bug fixes with metadata
-- Migrate external knowledge
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "entities": {
- "type": "array",
- "description": "Array of entities to create",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "Entity name (unique identifier)"
- },
- "entityType": {
- "type": "string",
- "description": "Entity type"
- },
- "observations": {
- "type": "array",
- "items": { "type": "string" },
- "description": "Array of observations (facts, notes)"
- },
- "tags": {
- "type": "array",
- "items": { "type": "string" },
- "description": "Optional tags"
- },
- "metadata": {
- "type": "object",
- "description": "Optional metadata"
- }
- },
- "required": ["name", "entityType", "observations"]
- }
- }
- },
- "required": ["entities"]
-}
-```
-
-#### Parameters
-
-| Field | Type | Required | Description | Example |
-|-------|------|----------|-------------|---------|
-| `entities` | array | Yes | Array of entity objects | See examples |
-| `entities[].name` | string | Yes | Unique entity identifier | "OAuth Integration 2026-02-03" |
-| `entities[].entityType` | string | Yes | Type of entity (see types below) | "decision", "feature" |
-| `entities[].observations` | array | Yes | Facts and notes about entity | ["Uses OAuth 2.0", "Supports Google/GitHub"] |
-| `entities[].tags` | array | No | Additional tags | ["auth", "oauth", "security"] |
-| `entities[].metadata` | object | No | Custom metadata | { "author": "KT", "priority": "high" } |
-
-#### Entity Types
-
-| Type | Description | Use Cases |
-|------|-------------|-----------|
-| `decision` | Architectural/technical decisions | "Chose PostgreSQL over MySQL" |
-| `bug_fix` | Bug fixes and root causes | "Fixed session timeout issue" |
-| `feature` | Feature implementations | "User profile feature" |
-| `lesson_learned` | Lessons from incidents | "Always validate input data" |
-| `best_practice` | Validated best practices | "Use JWT for stateless auth" |
-| `problem_solution` | Problem-solution pairs | "Solved N+1 query problem" |
-| `technical_debt` | Technical debt items | "Refactor user service needed" |
-| `optimization` | Performance optimizations | "Added database indexing" |
-| `refactoring` | Refactoring decisions | "Split monolith into services" |
-| `code_change` | Code change events | "Updated auth middleware" |
-| `test_result` | Test execution results | "All tests passing (245/245)" |
-
-#### Response Format
-
-```typescript
-{
- created: string[], // Names of created entities
- count: number, // Number created
- autoRelationsCreated: number, // Auto-inferred relations created (v2.9.3+)
- errors?: Array<{ // Errors if any
- name: string,
- error: string
- }>
-}
-```
-
-#### Auto-Relation Inference (v2.9.3+)
-
-After creating entities, MeMesh automatically infers and creates relations between new entities and existing entities that share topic keywords. This is a best-effort feature — it never blocks entity creation.
-
-**How it works:**
-1. Topic keywords are extracted from entity names (first 2 words, 3+ characters)
-2. New entities are compared against each other and against existing entities
-3. Relations are inferred based on entity type combinations:
-
-| Entity A Type | Entity B Type | Relation Created |
-|--------------|--------------|-----------------|
-| `bug_fix` | `feature` | bug_fix `solves` feature |
-| `decision` | `feature` | feature `enabled_by` decision |
-| `lesson_learned` | `bug_fix` | lesson_learned `caused_by` bug_fix |
-| Same type | Same type | `similar_to` |
+### remember
-**Limits:**
-- Maximum 50 auto-relations per batch (`MAX_AUTO_RELATIONS`)
-- Session-specific types excluded (`session_keypoint`, `session_identity`, `task_start`, `session_summary`)
-- Duplicate relations silently skipped (UNIQUE constraint)
-```
-
-#### Examples
+Store knowledge as an entity with observations, tags, and relations.
-**Example 1: Architecture Decision**
+**Input Schema**:
-Request:
-```json
-{
- "entities": [
- {
- "name": "PostgreSQL Database Choice 2026-02-03",
- "entityType": "decision",
- "observations": [
- "Chose PostgreSQL over MySQL for production database",
- "Primary reasons: JSON column support, better performance for complex queries",
- "Supports full-text search natively",
- "Better concurrency handling with MVCC"
- ],
- "tags": ["database", "postgresql", "architecture"],
- "metadata": {
- "author": "KT",
- "date": "2026-02-03",
- "impact": "high"
- }
- }
- ]
-}
-```
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `name` | string | Yes | Unique entity name (e.g., `"auth-decision"`, `"jwt-pattern"`) |
+| `type` | string | Yes | Entity type (e.g., `"decision"`, `"pattern"`, `"lesson"`, `"commit"`) |
+| `observations` | string[] | No | Key facts or observations about this entity |
+| `tags` | string[] | No | Tags for filtering (e.g., `"project:myapp"`, `"type:decision"`) |
+| `relations` | object[] | No | Relations to other entities |
-Response:
-```json
-{
- "created": ["PostgreSQL Database Choice 2026-02-03"],
- "count": 1
-}
-```
+**Relations object**:
-**Example 2: Bug Fix with Context**
+| Field | Type | Required | Description |
+|-------|------|----------|-------------|
+| `to` | string | Yes | Target entity name (must already exist) |
+| `type` | string | Yes | Relation type (e.g., `"implements"`, `"related-to"`) |
-Request:
-```json
-{
- "entities": [
- {
- "name": "Session Timeout Bug Fix 2026-02-03",
- "entityType": "bug_fix",
- "observations": [
- "Bug: Sessions expired immediately after login",
- "Root cause: Cookie domain was set incorrectly",
- "Solution: Changed cookie domain to null in session config",
- "Testing: Verified sessions persist for 24 hours"
- ],
- "tags": ["bug", "session", "authentication", "fixed"],
- "metadata": {
- "severity": "critical",
- "affected_users": 0,
- "fix_duration": "2 hours"
- }
- }
- ]
-}
-```
+**Response**:
-Response:
```json
{
- "created": ["Session Timeout Bug Fix 2026-02-03"],
- "count": 1
+ "stored": true,
+ "entityId": 1,
+ "name": "auth-decision",
+ "type": "decision",
+ "observations": 2,
+ "tags": 1,
+ "relations": 0
}
```
-**Example 3: Multiple Entities**
+If a relation target does not exist, the entity is still stored and `relationErrors` is included in the response.
-Request:
-```json
-{
- "entities": [
- {
- "name": "User Authentication Feature",
- "entityType": "feature",
- "observations": [
- "Implemented JWT-based authentication",
- "Supports email/password and OAuth",
- "Includes refresh token mechanism"
- ],
- "tags": ["feature", "auth", "jwt"]
- },
- {
- "name": "Authentication API Endpoints",
- "entityType": "code_change",
- "observations": [
- "POST /api/v1/auth/login",
- "POST /api/v1/auth/refresh",
- "POST /api/v1/auth/logout"
- ],
- "tags": ["api", "auth", "endpoints"]
- }
- ]
-}
-```
+**Examples**:
-Response:
```json
+// Store a decision
{
- "created": [
- "User Authentication Feature",
- "Authentication API Endpoints"
+ "name": "auth-decision",
+ "type": "decision",
+ "observations": [
+ "Chose JWT for authentication",
+ "Using RS256 algorithm for token signing"
],
- "count": 2
-}
-```
-
-#### Auto-Tagging Behavior
-
-If no `scope:` tag is provided, MeMesh automatically adds `scope:project`.
-
-**Example**:
-```json
-{
- "tags": ["decision", "database"]
-}
-```
-
-Becomes:
-```json
-{
- "tags": ["decision", "database", "scope:project"]
-}
-```
-
-#### Best Practices
-
-✅ **Do**:
-- Use descriptive entity names with dates
-- Include comprehensive observations
-- Add relevant tags for searchability
-- Use metadata for structured data
-- Group related entities in single call
-
-❌ **Don't**:
-- Create duplicate entities (check first)
-- Use vague entity names
-- Skip observations (required field)
-- Store sensitive data in observations
-
-#### Error Responses
-
-```typescript
-// Invalid entity type
-{
- created: ["Entity 1"],
- count: 1,
- errors: [
- {
- name: "Entity 2",
- error: "Invalid entity type: unknown_type"
- }
- ]
-}
-
-// Validation failure
-{
- error: "Validation failed: observations is required",
- code: "VALIDATION_FAILED"
+ "tags": ["project:myapp", "topic:auth"]
}
-// Database error
+// Store a pattern with a relation
{
- created: [],
- count: 0,
- errors: [
- {
- name: "Entity Name",
- error: "Database unavailable"
- }
+ "name": "error-handling-pattern",
+ "type": "pattern",
+ "observations": ["All API errors return {error, code, message} format"],
+ "tags": ["project:myapp"],
+ "relations": [
+ {"to": "auth-decision", "type": "related-to"}
]
}
```
---
----
-
-## Learning & Automation Tools
+### recall
-### memesh-record-mistake
+Search and retrieve stored knowledge. Uses FTS5 full-text search with optional tag filtering. Call with no query to list recent memories.
-**Purpose**: Record AI mistakes for learning and prevention — enables systematic improvement from user feedback.
+**Input Schema**:
-**Aliases**: `buddy-record-mistake` (deprecated, will be removed in v3.0.0)
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `query` | string | No | Search query (FTS5 full-text search). Leave empty to list recent entities. |
+| `tag` | string | No | Filter by tag (e.g., `"project:myapp"`) |
+| `limit` | number | No | Max results (default: 20, max: 100) |
-**Use Cases**:
-- User explicitly corrects AI behavior or approach
-- Violated a documented procedure or guideline
-- Made incorrect assumptions instead of asking
-- Skipped validation step and caused problems
+**Response**:
-#### Input Schema
+Returns an array of matching entities:
```json
-{
- "type": "object",
- "properties": {
- "action": {
- "type": "string",
- "description": "What action the AI took (the mistake)"
- },
- "errorType": {
- "type": "string",
- "description": "Error classification",
- "enum": [
- "procedure-violation",
- "workflow-skip",
- "assumption-error",
- "validation-skip",
- "responsibility-lack",
- "firefighting",
- "dependency-miss",
- "integration-error",
- "deployment-error"
- ]
- },
- "userCorrection": {
- "type": "string",
- "description": "User's correction/feedback"
- },
- "correctMethod": {
- "type": "string",
- "description": "What should have been done instead"
- },
- "impact": {
- "type": "string",
- "description": "Impact of the mistake"
- },
- "preventionMethod": {
- "type": "string",
- "description": "How to prevent this in the future"
- },
- "relatedRule": {
- "type": "string",
- "description": "Related rule/guideline (optional)"
- },
- "context": {
- "type": "object",
- "description": "Additional context (optional)"
- }
- },
- "required": ["action", "errorType", "userCorrection", "correctMethod", "impact", "preventionMethod"]
-}
+[
+ {
+ "id": 1,
+ "name": "auth-decision",
+ "type": "decision",
+ "created_at": "2026-03-09 12:00:00",
+ "observations": [
+ "Chose JWT for authentication",
+ "Using RS256 algorithm for token signing"
+ ],
+ "tags": ["project:myapp", "topic:auth"],
+ "relations": [
+ {"from": "auth-decision", "to": "api-design", "type": "related-to"}
+ ]
+ }
+]
```
-#### Parameters
-
-| Parameter | Type | Required | Description | Example |
-|-----------|------|----------|-------------|---------|
-| `action` | string | Yes | What action the AI took | "Edited file without reading first" |
-| `errorType` | string | Yes | Error classification (see enum) | "procedure-violation" |
-| `userCorrection` | string | Yes | User's correction/feedback | "Must read file before editing" |
-| `correctMethod` | string | Yes | What should have been done | "Use Read tool first" |
-| `impact` | string | Yes | Impact of the mistake | "Broke file indentation" |
-| `preventionMethod` | string | Yes | How to prevent in future | "ALWAYS Read before Edit" |
-| `relatedRule` | string | No | Related rule/guideline | "READ_BEFORE_EDIT" |
-| `context` | object | No | Additional context | `{}` |
+**Examples**:
-#### Example
-
-Request:
```json
-{
- "action": "Edited ServerInitializer.ts without reading file first",
- "errorType": "procedure-violation",
- "userCorrection": "Must read file before editing - broke indentation",
- "correctMethod": "Use Read tool first to see exact content and formatting, then Edit",
- "impact": "Broke file indentation, required re-edit, wasted user time",
- "preventionMethod": "ALWAYS invoke Read tool before Edit tool - no exceptions",
- "relatedRule": "READ_BEFORE_EDIT (Anti-Hallucination Protocol)"
-}
-```
-
----
+// Search by keyword
+{"query": "authentication"}
-### memesh-hook-tool-use
+// Search with tag filter
+{"query": "auth", "tag": "project:myapp"}
-**Purpose**: Process tool execution events from Claude Code CLI for workflow automation.
+// List recent (no query)
+{}
-**Aliases**: `hook-tool-use` (deprecated, will be removed in v3.0.0)
-
-**Note**: This tool is auto-triggered by Claude Code hooks. Do not call manually.
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "toolName": {
- "type": "string",
- "description": "Tool name executed by Claude Code (e.g., 'Write', 'Edit', 'Bash')"
- },
- "arguments": {
- "type": "object",
- "description": "Tool arguments payload (tool-specific)"
- },
- "success": {
- "type": "boolean",
- "description": "Whether the tool execution succeeded"
- },
- "duration": {
- "type": "number",
- "description": "Execution duration in milliseconds (optional)"
- },
- "tokensUsed": {
- "type": "number",
- "description": "Tokens used by the tool call (optional)"
- },
- "output": {
- "type": "string",
- "description": "Tool output (optional)"
- }
- },
- "required": ["toolName", "success"]
-}
+// List recent with limit
+{"limit": 5}
```
-#### Parameters
-
-| Parameter | Type | Required | Description | Example |
-|-----------|------|----------|-------------|---------|
-| `toolName` | string | Yes | Tool executed by Claude Code | "Write" |
-| `success` | boolean | Yes | Whether execution succeeded | true |
-| `arguments` | object | No | Tool arguments payload | `{"file_path": "..."}` |
-| `duration` | number | No | Duration in milliseconds | 150 |
-| `tokensUsed` | number | No | Tokens used by tool call | 500 |
-| `output` | string | No | Tool output | "File written" |
-
---
-## System Tools
-
-### memesh-generate-tests
-
-**Purpose**: Generate automated test cases from specifications or code.
-
-**Use Cases**:
-- Create test suites for new features
-- Generate tests from requirements
-- Create unit tests from code
-- Build comprehensive test coverage
-- Accelerate test development
-
-#### Input Schema
-
-```json
-{
- "type": "object",
- "properties": {
- "specification": {
- "type": "string",
- "description": "Feature specification or requirements"
- },
- "code": {
- "type": "string",
- "description": "Source code to generate tests for"
- }
- }
-}
-```
-
-**Note**: Either `specification` or `code` must be provided (but not both).
-
-#### Parameters
+### forget
-| Parameter | Type | Required | Description | Example |
-|-----------|------|----------|-------------|---------|
-| `specification` | string | Conditional | Feature spec or requirements | "User can log in with email/password" |
-| `code` | string | Conditional | Source code to test | "function authenticate(user) { ... }" |
+Delete an entity and all its associated observations, relations, and tags.
-#### Response Format
-
-```typescript
-{
- testCode: string, // Generated test code
- message: string // Success message and instructions
-}
-```
+**Input Schema**:
-#### Examples
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `name` | string | Yes | Entity name to delete |
-**Example 1: Generate from Specification**
+**Response**:
-Request:
```json
-{
- "specification": "User authentication feature:\n- User can log in with email and password\n- Invalid credentials return error\n- Successful login returns JWT token\n- Token expires after 24 hours"
-}
-```
+// Entity found and deleted
+{"deleted": true, "name": "auth-decision"}
-Response:
-```json
-{
- "testCode": "describe('User Authentication', () => {\n it('should log in with valid credentials', async () => {\n const result = await authenticate({\n email: 'user@example.com',\n password: 'correctPassword'\n });\n expect(result.token).toBeDefined();\n expect(result.token).toMatch(/^eyJ/);\n });\n\n it('should reject invalid credentials', async () => {\n await expect(authenticate({\n email: 'user@example.com',\n password: 'wrongPassword'\n })).rejects.toThrow('Invalid credentials');\n });\n\n it('should return token that expires in 24 hours', async () => {\n const result = await authenticate(validCredentials);\n const decoded = jwt.decode(result.token);\n const expiresIn = decoded.exp - decoded.iat;\n expect(expiresIn).toBe(86400); // 24 hours in seconds\n });\n});",
- "message": "Test cases generated successfully. Review and adjust as needed."
-}
+// Entity not found
+{"deleted": false, "message": "Entity \"auth-decision\" not found"}
```
-**Example 2: Generate from Code**
-
-Request:
-```json
-{
- "code": "export function calculateDiscount(price: number, couponCode: string): number {\n if (price <= 0) throw new Error('Invalid price');\n \n const discounts: Record = {\n 'SAVE10': 0.1,\n 'SAVE20': 0.2,\n 'SAVE50': 0.5\n };\n \n const discount = discounts[couponCode] || 0;\n return price * (1 - discount);\n}"
-}
-```
+**Example**:
-Response:
```json
-{
- "testCode": "describe('calculateDiscount', () => {\n it('should apply 10% discount for SAVE10', () => {\n expect(calculateDiscount(100, 'SAVE10')).toBe(90);\n });\n\n it('should apply 20% discount for SAVE20', () => {\n expect(calculateDiscount(100, 'SAVE20')).toBe(80);\n });\n\n it('should apply 50% discount for SAVE50', () => {\n expect(calculateDiscount(100, 'SAVE50')).toBe(50);\n });\n\n it('should return original price for invalid coupon', () => {\n expect(calculateDiscount(100, 'INVALID')).toBe(100);\n });\n\n it('should throw error for negative price', () => {\n expect(() => calculateDiscount(-10, 'SAVE10')).toThrow('Invalid price');\n });\n\n it('should throw error for zero price', () => {\n expect(() => calculateDiscount(0, 'SAVE10')).toThrow('Invalid price');\n });\n});",
- "message": "Test cases generated successfully. Review and adjust as needed."
-}
+{"name": "auth-decision"}
```
-#### Test Generation Strategy
-
-MeMesh uses sampling to generate comprehensive test suites that cover:
-
-**Functional Testing**:
-- Happy path scenarios
-- Edge cases
-- Boundary conditions
-- Error handling
-
-**Coverage Goals**:
-- Input validation
-- Business logic
-- Error conditions
-- Return values
-
-#### Best Practices
-
-✅ **Do**:
-- Review generated tests before using
-- Adjust assertions to match your framework
-- Add additional edge cases as needed
-- Run tests to verify they work
-- Customize test descriptions
-
-❌ **Don't**:
-- Use generated tests without review
-- Assume 100% coverage
-- Skip manual test cases
-- Ignore failing generated tests
-
-#### Supported Test Frameworks
-
-Generated tests are framework-agnostic but follow common patterns compatible with:
-- Jest
-- Mocha/Chai
-- Vitest
-- Jasmine
+---
-Adjust imports and syntax as needed for your framework.
+## Data Model
-#### Error Responses
+### Entity
-```typescript
-// Missing required input
-{
- error: "Either specification or code must be provided",
- code: "VALIDATION_FAILED"
-}
+| Field | Type | Description |
+|-------|------|-------------|
+| `id` | number | Auto-incremented primary key |
+| `name` | string | Unique entity name |
+| `type` | string | Entity type |
+| `created_at` | string | ISO timestamp |
+| `metadata` | object | Optional JSON metadata |
+| `observations` | string[] | Associated observations |
+| `tags` | string[] | Associated tags |
+| `relations` | Relation[] | Outgoing relations (optional) |
-// Generation failed
-{
- error: "Test generation failed: unable to parse code",
- code: "OPERATION_FAILED"
-}
+### Relation
-// Both inputs provided
-{
- error: "Provide either specification or code, not both",
- code: "VALIDATION_FAILED"
-}
-```
+| Field | Type | Description |
+|-------|------|-------------|
+| `from` | string | Source entity name |
+| `to` | string | Target entity name |
+| `type` | string | Relation type |
+| `metadata` | object | Optional JSON metadata |
---
-### memesh-metrics
-
-**Purpose**: View MeMesh session metrics, routing configuration, and memory status.
+## Error Handling
-#### Input Schema
+All tools return errors in a standard format:
```json
{
- "type": "object",
- "properties": {
- "section": {
- "type": "string",
- "enum": ["all", "session", "routing", "memory"],
- "description": "Which metrics section to return (default: 'all')"
- }
- }
+ "content": [{"type": "text", "text": "error message"}],
+ "isError": true
}
```
-#### Parameters
-
-| Parameter | Type | Required | Default | Description | Example |
-|-----------|------|----------|---------|-------------|---------|
-| `section` | string | No | "all" | Which metrics section to return | "session" |
-
-#### Section Details
-
-- **all** (default): Returns everything below
-- **session**: Current session state — modified files, tested files, code review status
-- **routing**: Active model rules, planning enforcement, dry-run gate, recent audit log
-- **memory**: Knowledge graph size and status
-
-#### Example
-
-Request:
-```json
-{
- "section": "memory"
-}
-```
+Common errors:
+- Unknown tool name
+- Zod validation failure (missing required fields, invalid types)
+- Entity not found (for relations in `remember`)
---
-## Data Models
-
-### Entity Model
-
-```typescript
-interface Entity {
- id?: number; // Auto-generated ID
- name: string; // Unique entity name
- entityType: EntityType; // Type of entity
- observations: string[]; // Facts and notes
- tags?: string[]; // Searchable tags
- metadata?: Record; // Custom metadata
- createdAt?: Date; // Creation timestamp
-}
+## CLI Commands
-type EntityType =
- // Knowledge types
- | 'decision' // Architecture/technical decisions
- | 'bug_fix' // Bug fixes and root causes
- | 'feature' // Feature implementations
- | 'lesson_learned' // Lessons from incidents
- | 'best_practice' // Validated best practices
- | 'problem_solution' // Problem-solution pairs
- | 'technical_debt' // Technical debt items
- | 'optimization' // Performance optimizations
- | 'refactoring' // Refactoring decisions
- | 'learning_experience' // Learning patterns
- // Memory/tracking types
- | 'code_change' // Code change events
- | 'test_result' // Test execution results
- | 'session_snapshot' // Session state snapshots
- | 'project_snapshot' // Project state snapshots
- | 'workflow_checkpoint' // Workflow completions
- | 'commit' // Git commit events
- | 'prevention_rule' // Prevention rules
- | 'user_preference'; // User preferences
-```
+### memesh-view
-### Relation Model
+Generate and open an interactive HTML dashboard for exploring stored knowledge.
-```typescript
-interface Relation {
- id?: number; // Auto-generated ID
- from: string; // Source entity name
- to: string; // Target entity name
- relationType: RelationType; // Type of relationship
- metadata?: Record; // Custom metadata
- createdAt?: Date; // Creation timestamp
-}
+**Usage**:
-type RelationType =
- | 'caused_by' // A caused by B
- | 'enabled_by' // A enabled by B
- | 'follows_pattern' // A follows pattern from B
- | 'solves' // A solves B
- | 'replaced_by' // A replaced by B
- | 'depends_on' // A depends on B
- | 'similar_to' // A similar to B
- | 'evolved_from'; // A evolved from B
+```bash
+memesh-view
```
-### Memory Model
-
-```typescript
-interface Memory {
- type: string; // Entity type
- observations: string[]; // Array of observations
- timestamp?: string; // ISO 8601 timestamp
-}
-```
+**Behavior**:
-### Task Metadata Model
+1. Opens the MeMesh database (`~/.memesh/knowledge-graph.db`)
+2. Reads all entities, observations, relations, and tags
+3. Generates a self-contained HTML file with:
+ - **Knowledge graph** -- D3.js force-directed graph showing entities and relations
+ - **Entity table** -- Searchable, sortable table of all entities with observations and tags
+ - **Statistics** -- Total entities, observations, relations, and tags
+4. Opens the HTML file in the default browser
-```typescript
-interface TaskMetadata {
- goal: string; // Extracted task goal
- reason?: string; // Reason for task
- expectedOutcome?: string; // Expected result
-}
-```
+No arguments or options required. The dashboard is a static HTML file that can be shared or archived.
---
-## Error Reference
-
-### Error Codes
-
-| Code | Description | HTTP Equivalent | Common Causes |
-|------|-------------|-----------------|---------------|
-| `VALIDATION_FAILED` | Input validation error | 400 | Missing required fields, invalid types |
-| `OPERATION_FAILED` | Operation execution failed | 500 | Database error, system unavailable |
-| `RESOURCE_NOT_FOUND` | Resource not found | 404 | Entity doesn't exist |
-| `ROUTING_FAILED` | Task routing failed | 500 | No matching capability |
-| `TOOL_NOT_FOUND` | Tool doesn't exist | 404 | Invalid tool name |
-
-### Error Response Format
-
-All tools return errors in a consistent format:
-
-```typescript
-interface ErrorResponse {
- error: string; // Human-readable error message
- code: string; // Error code (see table above)
- details?: object; // Additional error details
-}
-```
-
-### Common Error Scenarios
-
-#### Validation Errors
+## Connection
-```typescript
-// Missing required field
-{
- error: "Validation failed: task is required",
- code: "VALIDATION_FAILED"
-}
-
-// Invalid type
-{
- error: "Validation failed: limit must be a number",
- code: "VALIDATION_FAILED"
-}
-
-// Out of range
-{
- error: "Validation failed: limit must be between 1 and 50",
- code: "VALIDATION_FAILED"
-}
-```
-
-#### Operation Errors
-
-```typescript
-// Database unavailable
-{
- error: "Database connection failed",
- code: "OPERATION_FAILED",
- details: {
- reason: "Connection timeout",
- path: "~/.memesh/database.db"
- }
-}
+MeMesh runs as a stdio MCP server. Claude Code manages the connection automatically via the plugin's `.mcp.json` configuration.
-// Permission denied
+```json
{
- error: "Cannot write to data directory",
- code: "OPERATION_FAILED",
- details: {
- path: "~/.memesh",
- permission: "denied"
+ "mcpServers": {
+ "memesh": {
+ "command": "node",
+ "args": ["${CLAUDE_PLUGIN_ROOT}/dist/mcp/server.js"],
+ "env": { "NODE_ENV": "production" }
+ }
}
}
```
-
-#### Resource Not Found
-
-```typescript
-// Entity not found
-{
- error: "Entity not found: Non-existent Entity",
- code: "RESOURCE_NOT_FOUND"
-}
-
-// Tool not found
-{
- error: "Tool not found: invalid-tool-name",
- code: "TOOL_NOT_FOUND"
-}
-```
-
-### Error Handling Best Practices
-
-✅ **Do**:
-- Always check error codes
-- Log errors with context
-- Provide helpful error messages to users
-- Retry transient errors (with backoff)
-- Handle partial success in batch operations
-
-❌ **Don't**:
-- Ignore error responses
-- Expose sensitive error details to users
-- Retry non-transient errors indefinitely
-- Assume success without checking status
-
----
-
-## Integration Examples
-
-### Example 1: Complete Feature Development Workflow
-
-```typescript
-// 1. Search for relevant context
-const memories = await buddyRemember({
- query: "authentication patterns",
- limit: 5
-});
-
-// 2. Route implementation task
-const task = await buddyDo({
- task: "implement user authentication with JWT because we need secure API access"
-});
-
-// 3. Create entity for implementation
-await createEntities({
- entities: [
- {
- name: "User Authentication Implementation 2026-02-03",
- entityType: "feature",
- observations: [
- "Implemented JWT-based authentication",
- "Uses bcrypt for password hashing",
- "Refresh tokens stored in database",
- "Access tokens expire after 15 minutes"
- ],
- tags: ["auth", "jwt", "security", "feature"],
- metadata: {
- author: "KT",
- files: ["src/auth/jwt.ts", "src/middleware/auth.ts"],
- priority: "high"
- }
- }
- ]
-});
-
-// 4. Link to decision
-await createRelations({
- relations: [
- {
- from: "User Authentication Implementation 2026-02-03",
- to: "JWT Library Decision",
- relationType: "enabled_by"
- }
- ]
-});
-
-// 5. Generate tests
-const tests = await generateTests({
- specification: "User authentication with JWT:\n- Login with email/password\n- Token refresh\n- Token validation"
-});
-
-// 6. Store test results
-await addObservations({
- observations: [
- {
- entityName: "User Authentication Implementation 2026-02-03",
- contents: [
- "All tests passing: 42/42",
- "Coverage: 95%",
- "Security audit passed"
- ]
- }
- ]
-});
-```
-
-### Example 2: Bug Investigation and Fix
-
-```typescript
-// 1. Search for similar issues
-const similarBugs = await buddyRemember({
- query: "session timeout issues",
- limit: 10
-});
-
-// 2. Investigate
-const investigation = await buddyDo({
- task: "investigate why sessions expire immediately after login"
-});
-
-// 3. Document bug
-await createEntities({
- entities: [
- {
- name: "Session Expiry Bug 2026-02-03",
- entityType: "bug_fix",
- observations: [
- "Bug: Sessions expire immediately after login",
- "Root cause: Cookie domain set incorrectly",
- "Affected: All users on production",
- "Discovered: User reports and monitoring"
- ],
- tags: ["bug", "session", "critical", "production"],
- metadata: {
- severity: "critical",
- affectedUsers: 1523,
- discoveredAt: "2026-02-03T08:00:00Z"
- }
- }
- ]
-});
-
-// 4. Document fix
-await createEntities({
- entities: [
- {
- name: "Session Config Fix 2026-02-03",
- entityType: "code_change",
- observations: [
- "Changed cookie domain from '.example.com' to null",
- "Updated session middleware configuration",
- "Added tests for cookie settings"
- ],
- tags: ["fix", "session", "configuration"]
- }
- ]
-});
-
-// 5. Link bug and fix
-await createRelations({
- relations: [
- {
- from: "Session Expiry Bug 2026-02-03",
- to: "Incorrect Cookie Domain Config",
- relationType: "caused_by"
- },
- {
- from: "Session Config Fix 2026-02-03",
- to: "Session Expiry Bug 2026-02-03",
- relationType: "solves"
- }
- ]
-});
-
-// 6. Update with resolution
-await addObservations({
- observations: [
- {
- entityName: "Session Expiry Bug 2026-02-03",
- contents: [
- "Fixed: 2026-02-03T10:30:00Z",
- "Verified: Sessions now persist for 24 hours",
- "Deployed: Production rollout completed",
- "Impact: Zero session expiry reports since fix"
- ]
- }
- ]
-});
-```
-
-### Example 3: Knowledge Graph Building
-
-```typescript
-// Build a comprehensive knowledge graph for a project
-
-// 1. Architecture decisions
-await createEntities({
- entities: [
- {
- name: "Microservices Architecture Decision",
- entityType: "decision",
- observations: [
- "Chose microservices over monolith",
- "Reasons: Independent scaling, team autonomy, tech diversity",
- "Trade-offs: Increased complexity, distributed debugging"
- ],
- tags: ["architecture", "microservices", "decision"]
- },
- {
- name: "PostgreSQL Database Choice",
- entityType: "decision",
- observations: [
- "Selected PostgreSQL for all services",
- "Reasons: JSON support, ACID compliance, mature ecosystem"
- ],
- tags: ["database", "postgresql", "decision"]
- }
- ]
-});
-
-// 2. Service implementations
-await createEntities({
- entities: [
- {
- name: "User Service",
- entityType: "feature",
- observations: [
- "Handles user authentication and profiles",
- "Built with Node.js and Express",
- "Uses PostgreSQL for persistence"
- ],
- tags: ["service", "backend", "user"]
- },
- {
- name: "Order Service",
- entityType: "feature",
- observations: [
- "Manages order lifecycle",
- "Event-driven architecture with Kafka",
- "PostgreSQL for order storage"
- ],
- tags: ["service", "backend", "orders"]
- }
- ]
-});
-
-// 3. Build relationships
-await createRelations({
- relations: [
- {
- from: "User Service",
- to: "Microservices Architecture Decision",
- relationType: "follows_pattern"
- },
- {
- from: "User Service",
- to: "PostgreSQL Database Choice",
- relationType: "depends_on"
- },
- {
- from: "Order Service",
- to: "Microservices Architecture Decision",
- relationType: "follows_pattern"
- },
- {
- from: "Order Service",
- to: "PostgreSQL Database Choice",
- relationType: "depends_on"
- },
- {
- from: "Order Service",
- to: "User Service",
- relationType: "depends_on",
- metadata: {
- reason: "Needs user information for orders"
- }
- }
- ]
-});
-
-// 4. Add best practices
-await createEntities({
- entities: [
- {
- name: "Service Communication Best Practice",
- entityType: "best_practice",
- observations: [
- "Always use async messaging for service-to-service communication",
- "Prefer events over direct API calls",
- "Implement circuit breakers for resilience"
- ],
- tags: ["best_practice", "microservices", "resilience"]
- }
- ]
-});
-
-// 5. Link best practices
-await createRelations({
- relations: [
- {
- from: "Order Service",
- to: "Service Communication Best Practice",
- relationType: "follows_pattern"
- }
- ]
-});
-```
-
----
-
-## Rate Limits & Performance
-
-### Performance Characteristics
-
-| Operation | Typical Time | Notes |
-|-----------|-------------|-------|
-| `buddy-do` | 100-2000ms | Depends on task complexity |
-| `buddy-remember` (store) | 50-200ms | Simple database insert |
-| `buddy-remember` (search) | 100-500ms | Depends on graph size |
-| `memesh-create-entities` | 50-200ms per entity | Batch operations faster |
-| `memesh-record-mistake` | 50-200ms | Database insert |
-| `memesh-hook-tool-use` | 10-100ms | Event processing |
-| `memesh-generate-tests` | 1000-5000ms | Uses LLM sampling |
-| `memesh-metrics` | 10-100ms | Read-only aggregation |
-
-### Rate Limits
-
-**MCP Protocol Limits**:
-- No hard rate limits (local server)
-- Performance scales with system resources
-- Database operations are sequential
-
-**Best Practices**:
-- Batch entity creation when possible
-- Cache buddy-remember results when appropriate
-- Use appropriate query limits
-
-### Resource Usage
-
-**Memory**:
-- Base server: ~50-100MB
-- Knowledge graph: Grows with data (~1-10MB typical)
-- Peak usage during memesh-generate-tests: ~200-300MB
-
-**Storage**:
-- Database file: `~/.memesh/database.db`
-- Grows with entities and relations
-- Typical project: 1-50MB
-- Use SQLite VACUUM to optimize
-
-**CPU**:
-- Most operations: Low CPU usage
-- memesh-generate-tests: Medium CPU (LLM sampling)
-- memesh-metrics: Minimal CPU
-
-### Optimization Tips
-
-✅ **Optimize Performance**:
-- Batch memesh-create-entities operations
-- Use appropriate limit parameters
-- Cache buddy-remember searches
-- Clean up old entities periodically
-
-✅ **Monitor Usage**:
-- Use memesh-metrics to check memory status
-- Monitor database file size
-- Track operation latency
-- Review error rates
-
----
-
-## Appendix
-
-### Version History
-
-- **v2.6.6** (2026-02-03): Enhanced error handling, ErrorClassifier integration
-- **v2.6.5** (2026-02-02): Interactive tutorial, improved QUICK_START
-- **v2.6.4** (2026-02-01): Response formatting improvements
-- **v2.6.3** (2026-01-31): Interactive setup wizard
-- **v2.6.2** (2026-01-30): Auto-tracking and memory linking
-- **v2.6.0** (2026-01-28): Smart routing with capability matching
-
-### Related Documentation
-
-- **User Guide**: [USER_GUIDE.md](../USER_GUIDE.md) - Complete user documentation
-- **Best Practices**: [BEST_PRACTICES.md](../BEST_PRACTICES.md) - Effective workflows
-- **Quick Start**: [QUICK_START.md](../QUICK_START.md) - Getting started guide
-- **Troubleshooting**: [TROUBLESHOOTING.md](../TROUBLESHOOTING.md) - Common issues and fixes
-- **Commands**: [COMMANDS.md](../COMMANDS.md) - CLI command reference
-
-### Support
-
-**GitHub Repository**: https://github.com/PCIRCLE-AI/claude-code-buddy
-
-**Issues**: https://github.com/PCIRCLE-AI/claude-code-buddy/issues
-
-**Discussions**: https://github.com/PCIRCLE-AI/claude-code-buddy/discussions
-
----
-
-**MeMesh Plugin** - Persistent memory and context-aware task execution for Claude Code
-
-*For questions or issues, please visit the GitHub repository or open an issue.*
diff --git a/docs/images/dashboard-screenshot.png b/docs/images/dashboard-screenshot.png
new file mode 100644
index 00000000..4ad56611
Binary files /dev/null and b/docs/images/dashboard-screenshot.png differ
diff --git a/docs/index.html b/docs/index.html
deleted file mode 100644
index e0829098..00000000
--- a/docs/index.html
+++ /dev/null
@@ -1,348 +0,0 @@
-
-
-
-
-
- MeMesh Plugin — Searchable Project Memory for Claude Code
-
-
-
-
-
-
-
-
-
-
-
-
MeMesh Plugin
-
Memory, smart task analysis, and workflow automation for Claude Code
-
- npm install -g @pcircle/memesh
- click to copy
-
-
-
-
-
When projects grow, it gets hard to keep track of decisions across sessions. MeMesh gives your projects searchable memory, smarter task handling, and automated workflows — so every session picks up right where you left off.
-
-
-
What it does
-
-
-
🔍
-
Searchable memory
-
Find past decisions by meaning, not just keywords. Ask "why did we choose PostgreSQL?" and get answers
-
-
-
🧠
-
Smart task analysis
-
Analyzes your task, pulls in past context, and builds an enriched plan before executing
-
-
-
⚙️
-
Workflow automation
-
Session recaps, code review reminders, smart routing, and file tracking — all automatic
-
-
-
📝
-
Learn from mistakes
-
Record errors and fixes to build a knowledge base that prevents repeated mistakes
-
-
-
🔒
-
100% local
-
All data stays on your machine. Nothing uploaded
-
-
-
⚡
-
Zero config
-
Install and restart Claude Code. That's it
-
-
-
-
3 commands, that's all
-
-
- buddy-do "task"
- Run a task with memory context
-
-
- buddy-remember "topic"
- Search past decisions and context
-
-
- buddy-help
- Show available commands
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/eslint.config.js b/eslint.config.js
deleted file mode 100644
index 0ee5f8e6..00000000
--- a/eslint.config.js
+++ /dev/null
@@ -1,28 +0,0 @@
-import tsPlugin from "@typescript-eslint/eslint-plugin";
-import tsParser from "@typescript-eslint/parser";
-
-export default [
- {
- ignores: ["dist/**", "node_modules/**", "coverage/**"],
- },
- {
- files: ["src/**/*.ts", "tests/**/*.ts"],
- languageOptions: {
- parser: tsParser,
- parserOptions: {
- ecmaVersion: "latest",
- sourceType: "module",
- },
- },
- plugins: {
- "@typescript-eslint": tsPlugin,
- },
- rules: {
- ...tsPlugin.configs.recommended.rules,
- "@typescript-eslint/no-explicit-any": "off",
- "@typescript-eslint/no-unused-vars": "off",
- "@typescript-eslint/no-empty-object-type": "off",
- "@typescript-eslint/no-unsafe-function-type": "off",
- },
- },
-];
diff --git a/hooks/hooks.json b/hooks/hooks.json
index ffc65825..a3afb0f7 100644
--- a/hooks/hooks.json
+++ b/hooks/hooks.json
@@ -11,29 +11,9 @@
]
}
],
- "PreToolUse": [
- {
- "matcher": "*",
- "hooks": [
- {
- "type": "command",
- "command": "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/pre-tool-use.js"
- }
- ]
- }
- ],
"PostToolUse": [
{
- "matcher": "*",
- "hooks": [
- {
- "type": "command",
- "command": "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/post-tool-use.js"
- }
- ]
- },
- {
- "matcher": "*",
+ "matcher": "Bash",
"hooks": [
{
"type": "command",
@@ -41,26 +21,6 @@
}
]
}
- ],
- "Stop": [
- {
- "matcher": "*",
- "hooks": [
- {
- "type": "command",
- "command": "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/stop.js"
- }
- ]
- },
- {
- "matcher": "*",
- "hooks": [
- {
- "type": "command",
- "command": "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/subagent-stop.js"
- }
- ]
- }
]
}
}
diff --git a/package.json b/package.json
index 54312034..82f478de 100644
--- a/package.json
+++ b/package.json
@@ -1,66 +1,28 @@
{
"name": "@pcircle/memesh",
- "version": "2.10.1",
- "description": "MeMesh — Persistent memory plugin for Claude Code. Remembers architecture decisions, coding patterns, and project context across sessions.",
+ "version": "3.0.0-alpha.1",
+ "description": "MeMesh — Minimal persistent memory plugin for Claude Code.",
"main": "dist/index.js",
"type": "module",
"bin": {
- "memesh": "dist/mcp/server-bootstrap.js"
+ "memesh": "dist/mcp/server.js",
+ "memesh-view": "dist/cli/view.js"
},
"files": [
"dist/",
".mcp.json",
"hooks/hooks.json",
- "scripts/postinstall-new.js",
- "scripts/postinstall-lib.js",
- "scripts/health-check.js",
"scripts/hooks/",
- "scripts/skills/",
"plugin.json",
"README.md",
- "README.zh-TW.md",
"LICENSE"
],
"scripts": {
- "postinstall": "node scripts/postinstall-new.js",
- "dev": "tsx watch src/index.ts",
- "build": "tsc && npm run copy:resources && chmod +x dist/mcp/server-bootstrap.js && npm run prepare:plugin",
- "copy:resources": "mkdir -p dist/mcp/resources && cp -r src/mcp/resources/*.md dist/mcp/resources/",
- "build:plugin": "npm run build",
- "prepare:plugin": "node scripts/prepare-plugin.js",
- "start": "node dist/index.js",
+ "build": "tsc && chmod +x dist/mcp/server.js && chmod +x dist/cli/view.js && chmod +x scripts/hooks/*.js",
"test": "vitest",
- "test:coverage": "vitest --coverage",
- "test:e2e": "echo '⚠️ WARNING: Use test:e2e:safe instead to prevent system freeze!' && exit 1",
- "test:e2e:safe": "./scripts/e2e-resource-monitor.sh vitest run --config vitest.e2e.config.ts",
- "test:integration": "./scripts/e2e-resource-monitor.sh vitest run --config vitest.config.ts tests/**/*.test.ts",
- "lint": "eslint src --ext .ts",
- "format": "prettier --write 'src/**/*.ts'",
"typecheck": "tsc --noEmit",
- "cred": "tsx src/cli/index.ts cred",
- "mcp": "node dist/mcp/server-bootstrap.js",
- "docs": "typedoc",
- "docs:watch": "typedoc --watch",
- "verify:mcp": "./scripts/verify-mcp-stdio.sh",
- "test:install": "./scripts/test-installation.sh",
- "prepublishOnly": "npm run test:install",
- "processes:list": "./scripts/manage-mcp-processes.sh list",
- "processes:kill": "./scripts/manage-mcp-processes.sh kill",
- "processes:restart": "./scripts/manage-mcp-processes.sh restart",
- "processes:orphaned": "./scripts/manage-mcp-processes.sh orphaned",
- "processes:config": "./scripts/manage-mcp-processes.sh config",
- "verify:migration": "tsx scripts/verify-migration.ts",
- "backfill-embeddings": "tsx scripts/backfill-embeddings.ts"
+ "start": "node dist/mcp/server.js"
},
- "keywords": [
- "claude",
- "claude-code",
- "claude-code-plugin",
- "mcp",
- "ai-memory",
- "knowledge-graph",
- "persistent-memory"
- ],
"author": "PCIRCLE-AI",
"license": "MIT",
"homepage": "https://github.com/PCIRCLE-AI/claude-code-buddy",
@@ -68,62 +30,18 @@
"type": "git",
"url": "git+https://github.com/PCIRCLE-AI/claude-code-buddy.git"
},
- "bugs": {
- "url": "https://github.com/PCIRCLE-AI/claude-code-buddy/issues"
- },
"dependencies": {
- "@anthropic-ai/sdk": "^0.78.0",
- "@modelcontextprotocol/sdk": "^1.25.3",
- "@types/fs-extra": "^11.0.4",
- "@types/inquirer": "^9.0.9",
- "@xenova/transformers": "^2.17.2",
- "asciichart": "^1.5.25",
"better-sqlite3": "^12.6.2",
- "boxen": "^8.0.1",
- "chalk": "^5.6.2",
- "chokidar": "^5.0.0",
- "cli-spinners": "^3.4.0",
- "cli-table3": "^0.6.5",
- "commander": "^14.0.2",
- "dotenv": "^17.2.3",
- "express": "^5.2.1",
- "fs-extra": "^11.3.3",
- "glob": "^13.0.0",
- "inquirer": "^13.2.2",
- "minimatch": "^10.1.1",
- "onnxruntime-node": "^1.23.2",
- "ora": "^9.0.0",
- "sqlite": "^5.1.1",
- "sqlite-vec": "0.1.3",
- "uuid": "^13.0.0",
- "winston": "^3.19.0",
+ "@modelcontextprotocol/sdk": "^1.25.3",
"zod": "^4.3.5"
},
"devDependencies": {
- "@types/asciichart": "^1.5.8",
"@types/better-sqlite3": "^7.6.13",
- "@types/express": "^5.0.6",
"@types/node": "^25.0.9",
- "@types/supertest": "^6.0.3",
- "@types/uuid": "^11.0.0",
- "@types/winston": "^2.4.4",
- "@typescript-eslint/eslint-plugin": "^8.56.1",
- "@typescript-eslint/parser": "^8.56.1",
- "@vitest/coverage-v8": "^4.0.17",
- "ajv": "^8.17.1",
- "eslint": "^10.0.2",
- "prettier": "^3.8.0",
- "supertest": "^7.2.2",
- "tsx": "^4.19.2",
- "typedoc": "^0.28.16",
"typescript": "^5.9.3",
"vitest": "^4.0.17"
},
- "overrides": {
- "hono": "4.11.10"
- },
"engines": {
- "node": ">=20.0.0",
- "npm": ">=9.0.0"
+ "node": ">=20.0.0"
}
}
diff --git a/plugin.json b/plugin.json
index 708cc0a0..b163fa22 100644
--- a/plugin.json
+++ b/plugin.json
@@ -1,15 +1,12 @@
{
"name": "memesh",
- "description": "MeMesh Plugin — Persistent memory plugin for Claude Code. Remembers architecture decisions, coding patterns, and project context across sessions.",
- "author": {
- "name": "PCIRCLE AI"
- },
- "version": "2.10.1",
+ "description": "MeMesh — Minimal persistent memory plugin for Claude Code.",
+ "author": { "name": "PCIRCLE AI" },
+ "version": "3.0.0-alpha.1",
"homepage": "https://github.com/PCIRCLE-AI/claude-code-buddy",
"repository": "https://github.com/PCIRCLE-AI/claude-code-buddy",
"license": "MIT",
- "keywords": ["claude-code", "mcp", "knowledge-graph", "ai-memory", "persistent-memory"],
+ "keywords": ["claude-code", "mcp", "knowledge-graph", "ai-memory"],
"mcpServers": "./.mcp.json",
- "hooks": "./hooks/hooks.json",
- "skills": "./skills/"
+ "hooks": "./hooks/hooks.json"
}
diff --git a/scripts/backfill-embeddings.ts b/scripts/backfill-embeddings.ts
deleted file mode 100644
index 190d3649..00000000
--- a/scripts/backfill-embeddings.ts
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env tsx
-/**
- * Backfill embeddings for existing entities
- *
- * This script generates embeddings for entities that don't have one yet.
- * It processes entities in batches to manage memory and provides progress reporting.
- *
- * Usage:
- * npx tsx scripts/backfill-embeddings.ts
- * npx tsx scripts/backfill-embeddings.ts --batch-size 50
- * npx tsx scripts/backfill-embeddings.ts --dry-run
- * npx tsx scripts/backfill-embeddings.ts --verbose
- */
-
-import { KnowledgeGraphSQLite } from '../src/knowledge-graph/index.js';
-import { LazyEmbeddingService } from '../src/embeddings/index.js';
-
-interface BackfillOptions {
- batchSize: number;
- dryRun: boolean;
- verbose: boolean;
-}
-
-async function backfillEmbeddings(options: BackfillOptions): Promise {
- const { batchSize, dryRun, verbose } = options;
-
- console.log('🚀 Starting embedding backfill...');
- console.log(` Batch size: ${batchSize}`);
- console.log(` Dry run: ${dryRun}`);
- console.log('');
-
- // Initialize knowledge graph
- const kg = new KnowledgeGraphSQLite();
- await kg.initialize();
-
- // Get entities without embeddings
- const entitiesWithoutEmbedding = await kg.getEntitiesWithoutEmbeddings();
- const total = entitiesWithoutEmbedding.length;
-
- // Show current stats
- const stats = kg.getEmbeddingStats();
- console.log('📊 Current embedding stats:');
- console.log(` With embeddings: ${stats.withEmbeddings}`);
- console.log(` Without embeddings: ${stats.withoutEmbeddings}`);
- console.log(` Total entities: ${stats.total}`);
- console.log('');
-
- if (total === 0) {
- console.log('✅ All entities already have embeddings!');
- return;
- }
-
- if (dryRun) {
- console.log('🔍 Dry run - entities that would be processed:');
- for (const entity of entitiesWithoutEmbedding.slice(0, 10)) {
- console.log(` - ${entity.name} (${entity.entityType})`);
- }
- if (total > 10) {
- console.log(` ... and ${total - 10} more`);
- }
- return;
- }
-
- // Initialize embedding service
- console.log('⏳ Loading embedding model...');
- const embeddingService = await LazyEmbeddingService.get();
- console.log('✅ Model loaded\n');
-
- // Process in batches
- let processed = 0;
- let failed = 0;
- const startTime = Date.now();
-
- for (let i = 0; i < total; i += batchSize) {
- const batch = entitiesWithoutEmbedding.slice(i, i + batchSize);
-
- for (const entity of batch) {
- try {
- // Create text from name + observations
- const textParts = [entity.name];
- if (entity.observations?.length) {
- textParts.push(...entity.observations);
- }
- const text = textParts.join(' ');
-
- // Generate embedding
- const embedding = await embeddingService.encode(text);
-
- // Store embedding
- kg.updateEntityEmbedding(entity.name, embedding);
-
- processed++;
-
- if (verbose) {
- console.log(`✓ ${entity.name}`);
- }
- } catch (error) {
- failed++;
- console.error(
- `✗ Failed: ${entity.name} - ${error instanceof Error ? error.message : error}`
- );
- }
- }
-
- // Progress update
- const progress = (((i + batch.length) / total) * 100).toFixed(1);
- const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
- process.stdout.write(
- `\r📈 Progress: ${progress}% (${processed}/${total}) - ${elapsed}s elapsed`
- );
- }
-
- console.log('\n');
- console.log('═'.repeat(50));
- console.log('📊 Backfill Complete');
- console.log('═'.repeat(50));
- console.log(` Total entities: ${total}`);
- console.log(` ✅ Processed: ${processed}`);
- console.log(` ❌ Failed: ${failed}`);
- console.log(` ⏱️ Duration: ${((Date.now() - startTime) / 1000).toFixed(1)}s`);
- console.log('═'.repeat(50));
-
- // Cleanup
- await LazyEmbeddingService.dispose();
-}
-
-// Parse command line arguments
-function parseArgs(): BackfillOptions {
- const args = process.argv.slice(2);
-
- return {
- batchSize: parseInt(args.find((a) => a.startsWith('--batch-size='))?.split('=')[1] || '20'),
- dryRun: args.includes('--dry-run'),
- verbose: args.includes('--verbose') || args.includes('-v'),
- };
-}
-
-// Main
-const options = parseArgs();
-backfillEmbeddings(options).catch((error) => {
- console.error('Fatal error:', error);
- process.exit(1);
-});
diff --git a/scripts/check-system-resources.js b/scripts/check-system-resources.js
deleted file mode 100644
index 400d382b..00000000
--- a/scripts/check-system-resources.js
+++ /dev/null
@@ -1,29 +0,0 @@
-import os from "os";
-
-const bytesToGb = (bytes) => (bytes / 1024 / 1024 / 1024).toFixed(1);
-
-const totalMem = os.totalmem();
-const freeMem = os.freemem();
-const cpuCount = os.cpus().length;
-const load = os.loadavg().map((value) => value.toFixed(2)).join(", ");
-
-console.log("System resources:");
-console.log(`- CPUs: ${cpuCount}`);
-console.log(`- Memory: ${bytesToGb(freeMem)} GB free / ${bytesToGb(totalMem)} GB total`);
-console.log(`- Load avg (1m, 5m, 15m): ${load}`);
-
-const warnings = [];
-if (totalMem < 4 * 1024 * 1024 * 1024) {
- warnings.push("Recommended memory is 4 GB or more for best performance.");
-}
-if (freeMem < 1024 * 1024 * 1024) {
- warnings.push("Low free memory detected. Close unused apps if you see slowdowns.");
-}
-
-if (warnings.length > 0) {
- console.log("");
- console.log("Resource notes:");
- for (const warning of warnings) {
- console.log(`- ${warning}`);
- }
-}
diff --git a/scripts/cleanup-orphan-processes.sh b/scripts/cleanup-orphan-processes.sh
deleted file mode 100755
index 07c3bfc4..00000000
--- a/scripts/cleanup-orphan-processes.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-#
-# Cleanup Orphan MeMesh Processes
-# Runs on SessionStart to prevent MCP connection failures
-#
-
-# Check for orphan memesh processes
-ORPHANS=$(ps aux | grep "server-bootstrap.js" | grep -v grep | wc -l | tr -d ' ')
-
-if [ "$ORPHANS" -gt 0 ]; then
- echo "⚠️ Found $ORPHANS orphan memesh process(es), cleaning up..."
- pkill -f "server-bootstrap.js"
- sleep 1
-
- # Verify cleanup
- REMAINING=$(ps aux | grep "server-bootstrap.js" | grep -v grep | wc -l | tr -d ' ')
- if [ "$REMAINING" -eq 0 ]; then
- echo "✅ Cleaned up orphan processes"
- else
- echo "❌ Failed to clean up all orphan processes"
- fi
-else
- # Silent if no orphans (don't clutter output)
- true
-fi
diff --git a/scripts/e2e-resource-monitor.sh b/scripts/e2e-resource-monitor.sh
deleted file mode 100755
index 1aeae259..00000000
--- a/scripts/e2e-resource-monitor.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-if [ "$#" -eq 0 ]; then
- echo "Usage: $0 [args...]"
- exit 1
-fi
-
-MAX_RSS_MB=${MAX_RSS_MB:-4096}
-MAX_CPU_PERCENT=${MAX_CPU_PERCENT:-400}
-MAX_DURATION_SEC=${MAX_DURATION_SEC:-1800}
-POLL_INTERVAL_SEC=${POLL_INTERVAL_SEC:-2}
-
-COMMAND=("$@")
-START_TS=$(date +%s)
-KILLED_BY_MONITOR=0
-
-cleanup() {
- if kill -0 "$PID" 2>/dev/null; then
- kill "$PID" 2>/dev/null || true
- fi
-}
-
-"${COMMAND[@]}" &
-PID=$!
-trap cleanup INT TERM
-
-while kill -0 "$PID" 2>/dev/null; do
- ELAPSED=$(( $(date +%s) - START_TS ))
- if [ "$ELAPSED" -gt "$MAX_DURATION_SEC" ]; then
- echo "[monitor] Max duration exceeded (${MAX_DURATION_SEC}s). Terminating PID $PID."
- KILLED_BY_MONITOR=1
- kill "$PID" 2>/dev/null || true
- break
- fi
-
- PS_OUT=$(ps -o %cpu= -o rss= -p "$PID" 2>/dev/null || true)
- if [ -n "$PS_OUT" ]; then
- CPU=$(echo "$PS_OUT" | awk '{print int($1)}')
- RSS_KB=$(echo "$PS_OUT" | awk '{print int($2)}')
- RSS_MB=$(( RSS_KB / 1024 ))
-
- if [ "$RSS_MB" -gt "$MAX_RSS_MB" ]; then
- echo "[monitor] RSS ${RSS_MB}MB exceeded limit ${MAX_RSS_MB}MB. Terminating PID $PID."
- KILLED_BY_MONITOR=1
- kill "$PID" 2>/dev/null || true
- break
- fi
-
- if [ "$CPU" -gt "$MAX_CPU_PERCENT" ]; then
- echo "[monitor] CPU ${CPU}% exceeded limit ${MAX_CPU_PERCENT}%. Terminating PID $PID."
- KILLED_BY_MONITOR=1
- kill "$PID" 2>/dev/null || true
- break
- fi
- fi
-
- sleep "$POLL_INTERVAL_SEC"
-done
-
-wait "$PID" || EXIT_CODE=$?
-EXIT_CODE=${EXIT_CODE:-0}
-
-if [ "$KILLED_BY_MONITOR" -eq 1 ]; then
- echo "[monitor] Test run stopped by resource monitor."
- exit 1
-fi
-
-exit "$EXIT_CODE"
diff --git a/scripts/health-check.js b/scripts/health-check.js
deleted file mode 100644
index 4e31e5ad..00000000
--- a/scripts/health-check.js
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * MeMesh Plugin Health Check
- *
- * Fast, non-invasive validation of plugin installation.
- * Supports both npm global install and local dev install.
- *
- * Exit codes:
- * 0 - All healthy
- * 1 - Repairable issues found
- * 2 - Fatal error (requires manual intervention)
- */
-
-import { existsSync, readFileSync, lstatSync, realpathSync } from 'fs';
-import { join, dirname } from 'path';
-import { homedir } from 'os';
-import { fileURLToPath } from 'url';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const projectRoot = join(__dirname, '..');
-
-// Parse CLI flags
-const silent = process.argv.includes('--silent');
-const verbose = process.argv.includes('--verbose');
-const json = process.argv.includes('--json');
-
-/**
- * Detect installation mode based on directory structure.
- * - 'npm-global': installed via npm install -g (dist/ is in package root)
- * - 'dev': running from project source (needs .claude-plugin/memesh/dist/)
- * - 'plugin': installed via /plugin marketplace add (managed by Claude Code)
- */
-function detectInstallMode() {
- // If dist/mcp/server-bootstrap.js exists at package root, it's npm or plugin install
- if (existsSync(join(projectRoot, 'dist', 'mcp', 'server-bootstrap.js'))) {
- // Check if we're inside node_modules (npm global)
- if (projectRoot.includes('node_modules')) {
- return 'npm-global';
- }
- // Check if src/ exists (dev environment)
- if (existsSync(join(projectRoot, 'src'))) {
- return 'dev';
- }
- return 'plugin';
- }
- // Fallback: dev mode without build
- return 'dev';
-}
-
-const installMode = detectInstallMode();
-
-/**
- * Health check result structure
- */
-const result = {
- healthy: true,
- installMode,
- issues: [],
- timestamp: new Date().toISOString(),
- checks: {
- dist: false,
- pluginJson: false,
- mcpJson: false,
- hooks: false,
- marketplace: false,
- symlink: false,
- settings: false,
- }
-};
-
-/**
- * Add an issue to the result
- */
-function addIssue(path, severity, message, repairable = true) {
- result.issues.push({ path, severity, message, repairable });
- result.healthy = false;
- if (!silent && !json) {
- const icon = severity === 'error' ? '❌' : '⚠️';
- console.error(` ${icon} ${path}: ${message}`);
- }
-}
-
-/**
- * Log success message
- */
-function logSuccess(message) {
- if (!silent && !json) {
- if (verbose) {
- console.log(` ✅ ${message}`);
- }
- }
-}
-
-// ============================================================================
-// Start
-// ============================================================================
-
-if (!silent && !json) {
- console.log('🔍 Checking MeMesh Plugin installation...\n');
- console.log(` Mode: ${installMode}`);
- console.log(` Path: ${projectRoot}\n`);
-}
-
-// ============================================================================
-// Check 1: Server bootstrap exists
-// ============================================================================
-
-const serverPath = installMode === 'dev'
- ? join(projectRoot, '.claude-plugin', 'memesh', 'dist', 'mcp', 'server-bootstrap.js')
- : join(projectRoot, 'dist', 'mcp', 'server-bootstrap.js');
-
-if (!existsSync(serverPath)) {
- const hint = installMode === 'dev' ? 'Run: npm run build' : 'Reinstall: npm install -g @pcircle/memesh';
- addIssue('dist', 'error', `server-bootstrap.js not found at ${serverPath}`, false);
-
- if (!silent && !json) {
- console.error(`\n❌ Plugin not built. ${hint}\n`);
- }
-
- if (json) {
- console.log(JSON.stringify(result, null, 2));
- }
-
- process.exit(2);
-} else {
- result.checks.dist = true;
- logSuccess('server-bootstrap.js exists');
-}
-
-// ============================================================================
-// Check 2: plugin.json exists and is valid
-// ============================================================================
-
-const pluginJsonPath = join(projectRoot, 'plugin.json');
-
-try {
- if (!existsSync(pluginJsonPath)) {
- addIssue('pluginJson', 'error', 'plugin.json not found');
- } else {
- const plugin = JSON.parse(readFileSync(pluginJsonPath, 'utf-8'));
- if (!plugin.name || !plugin.version) {
- addIssue('pluginJson', 'error', 'plugin.json missing name or version');
- } else {
- result.checks.pluginJson = true;
- logSuccess(`plugin.json valid (${plugin.name} v${plugin.version})`);
- }
- }
-} catch (error) {
- addIssue('pluginJson', 'error', `Failed to parse plugin.json: ${error.message}`);
-}
-
-// ============================================================================
-// Check 3: .mcp.json exists and is valid
-// ============================================================================
-
-const mcpJsonPath = join(projectRoot, '.mcp.json');
-
-try {
- if (!existsSync(mcpJsonPath)) {
- addIssue('mcpJson', 'error', '.mcp.json not found');
- } else {
- const mcp = JSON.parse(readFileSync(mcpJsonPath, 'utf-8'));
- if (!mcp.mcpServers?.memesh) {
- addIssue('mcpJson', 'error', '.mcp.json missing memesh server definition');
- } else {
- result.checks.mcpJson = true;
- logSuccess('.mcp.json valid');
- }
- }
-} catch (error) {
- addIssue('mcpJson', 'error', `Failed to parse .mcp.json: ${error.message}`);
-}
-
-// ============================================================================
-// Check 4: hooks/hooks.json exists and scripts are present
-// ============================================================================
-
-const hooksJsonPath = join(projectRoot, 'hooks', 'hooks.json');
-
-try {
- if (!existsSync(hooksJsonPath)) {
- addIssue('hooks', 'error', 'hooks/hooks.json not found');
- } else {
- const hooksConfig = JSON.parse(readFileSync(hooksJsonPath, 'utf-8'));
- const events = Object.keys(hooksConfig.hooks || {});
-
- if (events.length === 0) {
- addIssue('hooks', 'warning', 'No hook events defined');
- } else {
- let allScriptsExist = true;
- for (const event of events) {
- for (const entry of hooksConfig.hooks[event]) {
- for (const hook of entry.hooks) {
- const scriptPath = hook.command.replace('${CLAUDE_PLUGIN_ROOT}', projectRoot);
- if (!existsSync(scriptPath)) {
- addIssue('hooks', 'error', `Hook script missing: ${scriptPath}`);
- allScriptsExist = false;
- }
- }
- }
- }
- if (allScriptsExist) {
- result.checks.hooks = true;
- logSuccess(`hooks valid (${events.length} events)`);
- }
- }
- }
-} catch (error) {
- addIssue('hooks', 'error', `Failed to parse hooks.json: ${error.message}`);
-}
-
-// ============================================================================
-// Check 5: Marketplace registration (npm-global and dev modes)
-// ============================================================================
-
-const knownMarketplacesPath = join(homedir(), '.claude', 'plugins', 'known_marketplaces.json');
-
-try {
- if (!existsSync(knownMarketplacesPath)) {
- addIssue('marketplace', 'warning', 'known_marketplaces.json not found (plugin may be installed via /plugin command)');
- } else {
- const content = readFileSync(knownMarketplacesPath, 'utf-8');
- const marketplaces = JSON.parse(content);
-
- if (marketplaces['pcircle-ai']) {
- result.checks.marketplace = true;
- logSuccess('Marketplace registered');
- } else {
- addIssue('marketplace', 'warning', 'pcircle-ai not in known_marketplaces.json (may be installed via /plugin command)');
- }
- }
-} catch (error) {
- addIssue('marketplace', 'error', `Failed to check marketplace: ${error.message}`);
-}
-
-// ============================================================================
-// Check 6: Symlink or plugin discovery
-// ============================================================================
-
-const symlinkPath = join(homedir(), '.claude', 'plugins', 'marketplaces', 'pcircle-ai');
-
-try {
- if (!existsSync(symlinkPath)) {
- addIssue('symlink', 'warning', 'Marketplace symlink not found (may be installed via /plugin command)');
- } else {
- const stats = lstatSync(symlinkPath);
-
- if (stats.isSymbolicLink()) {
- const target = realpathSync(symlinkPath);
- if (!existsSync(target)) {
- addIssue('symlink', 'error', 'Symlink target does not exist (broken symlink)');
- } else {
- result.checks.symlink = true;
- logSuccess(`Symlink valid → ${target}`);
- }
- } else if (stats.isDirectory()) {
- // Could be a direct clone (plugin marketplace install)
- result.checks.symlink = true;
- logSuccess('Plugin directory exists (marketplace install)');
- } else {
- addIssue('symlink', 'error', 'Marketplace path is not a symlink or directory');
- }
- }
-} catch (error) {
- addIssue('symlink', 'error', `Failed to check symlink: ${error.message}`);
-}
-
-// ============================================================================
-// Check 7: Plugin enabled in settings
-// ============================================================================
-
-const settingsPath = join(homedir(), '.claude', 'settings.json');
-
-try {
- if (!existsSync(settingsPath)) {
- addIssue('settings', 'warning', 'settings.json not found');
- } else {
- const content = readFileSync(settingsPath, 'utf-8');
- const settings = JSON.parse(content);
-
- if (!settings.enabledPlugins) {
- addIssue('settings', 'warning', 'enabledPlugins not found in settings.json');
- } else if (!settings.enabledPlugins['memesh@pcircle-ai']) {
- addIssue('settings', 'warning', 'memesh@pcircle-ai not enabled (may need to enable via /plugin command)');
- } else if (settings.enabledPlugins['memesh@pcircle-ai'] !== true) {
- addIssue('settings', 'warning', 'memesh@pcircle-ai is disabled');
- } else {
- result.checks.settings = true;
- logSuccess('Plugin enabled in settings');
- }
- }
-} catch (error) {
- addIssue('settings', 'error', `Failed to check settings: ${error.message}`);
-}
-
-// ============================================================================
-// Summary
-// ============================================================================
-
-if (json) {
- console.log(JSON.stringify(result, null, 2));
-} else if (!silent) {
- console.log('\n' + '═'.repeat(60));
-
- if (result.healthy) {
- console.log('✅ All checks passed - plugin installation healthy');
- console.log('═'.repeat(60));
- } else {
- const errors = result.issues.filter(i => i.severity === 'error');
- const warnings = result.issues.filter(i => i.severity === 'warning');
-
- if (errors.length > 0) {
- console.log(`❌ Found ${errors.length} error(s), ${warnings.length} warning(s)`);
- } else {
- console.log(`⚠️ Found ${warnings.length} warning(s) (non-critical)`);
- }
- console.log('═'.repeat(60));
-
- if (errors.length > 0) {
- const hint = installMode === 'dev' ? 'npm run build' : 'npm install -g @pcircle/memesh';
- console.log(`\n🔧 Fix errors first. Try: ${hint}\n`);
- }
- }
-}
-
-// Exit: errors → 1 or 2, warnings only → 0
-const hasErrors = result.issues.some(i => i.severity === 'error');
-const hasUnrepairableErrors = result.issues.some(i => i.severity === 'error' && !i.repairable);
-process.exit(hasErrors ? (hasUnrepairableErrors ? 2 : 1) : 0);
diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md
deleted file mode 100644
index e5f8a47f..00000000
--- a/scripts/hooks/README.md
+++ /dev/null
@@ -1,230 +0,0 @@
-# MeMesh Hooks for Claude Code
-
-**What are these?** Scripts that run automatically when you use Claude Code. They provide memory management, smart routing, code quality enforcement, and planning assistance.
-
-## What They Do
-
-| When | What Happens |
-|------|--------------|
-| **You open Claude Code** | Reloads CLAUDE.md, shows last session recap (cache-first) |
-| **Before a tool runs** | Smart routing, planning template injection, dry-run gate, code review reminder |
-| **After a tool runs** | Tracks work patterns, file modifications, test executions |
-| **You make a git commit** | Saves commit context to knowledge graph (batched) |
-| **A subagent finishes** | Saves code review results, tracks completion |
-| **You close Claude Code** | Saves session summary + cache for fast next startup |
-
-## Installation
-
-```bash
-# Copy hooks to Claude Code
-cp scripts/hooks/*.js ~/.claude/hooks/
-cp -r scripts/hooks/templates/ ~/.claude/hooks/templates/
-chmod +x ~/.claude/hooks/*.js
-```
-
-**Done!** Restart Claude Code to activate.
-
----
-
-## Features
-
-### Smart Router (PreToolUse)
-
-Routes subagent tasks to optimal models and controls background execution.
-
-```
-Task(Explore) → model: haiku (fast search)
-Task(Plan) → inject SDD+BDD planning template
-Task(heavy) → check for untested code, warn if found
-```
-
-Configuration: `~/.memesh/routing-config.json`
-
-```json
-{
- "modelRouting": {
- "rules": [
- { "subagentType": "Explore", "model": "haiku", "reason": "Fast search" }
- ]
- },
- "backgroundRules": [
- { "subagentType": "Explore", "forceBackground": false }
- ],
- "planningEnforcement": { "enabled": true },
- "dryRunGate": { "enabled": true },
- "auditLog": true
-}
-```
-
-Audit log: `~/.memesh/routing-audit.log`
-
-### Planning Enforcement
-
-When a Plan subagent is dispatched, the hook injects a template requiring:
-- System Design Description (SDD)
-- Behavior-Driven Design (BDD) with Gherkin scenarios
-- Edge case handling table
-- Dry-run test plan
-- Risk assessment
-
-The plan is always presented to the user for approval before implementation.
-
-### Dry-Run Gate
-
-Tracks which files were modified (Write/Edit) and which were tested
-(vitest/jest/tsc/node --check). Before heavy Task dispatches, warns if
-modified files haven't been tested yet.
-
-**Advisory only** — never blocks, just informs.
-
-### Pre-Commit Code Review
-
-```
-git commit detected → Code review done? → Yes → Allow
- → No → Inject reminder
-```
-
-### Auto-Memory (Batched)
-
-```
-Open Claude Code → Work normally → Git commit → Close Claude Code
- ↓ ↓ ↓ ↓
-Cache-first recall Track patterns Batch save to KG Cache + archive
-(0 SQLite spawns) (async writes) (2 spawns vs 8)
-```
-
-### What Gets Tracked
-
-| Symbol | Meaning |
-|--------|---------|
-| 📁 | Files you changed |
-| ✅ | Git commits you made |
-| 💡 | Things you learned |
-| ⚠️ | Problems you ran into |
-| 🎯 | Decisions you made |
-| 🔍 | Code review findings |
-
----
-
-## Troubleshooting
-
-### "Hooks not working"
-
-```bash
-ls ~/.claude/hooks/
-cp scripts/hooks/*.js ~/.claude/hooks/
-```
-
-### "No memory showing"
-
-```bash
-ls ~/.memesh/knowledge-graph.db
-```
-
-### "Routing not applying"
-
-```bash
-# Check config
-cat ~/.memesh/routing-config.json
-
-# Check audit log
-tail -20 ~/.memesh/routing-audit.log
-```
-
-## Limitations
-
-| What | Details |
-|------|---------|
-| **Claude Code only** | Doesn't work in Cursor |
-| **30-day memory** | Old session memories auto-deleted |
-| **Local only** | No sync between computers |
-| **Advisory gates** | Dry-run and review are reminders, not blockers |
-
----
-
-## Files
-
-```
-scripts/hooks/
-├── session-start.js ← SessionStart: reload CLAUDE.md, cache-first recall
-├── pre-tool-use.js ← PreToolUse: handler registry (4 handlers)
-│ ├── codeReviewHandler — git commit review enforcement
-│ ├── routingHandler — model/background selection
-│ ├── planningHandler — SDD+BDD template injection
-│ └── dryRunGateHandler — untested code warning
-├── post-tool-use.js ← PostToolUse: patterns, file/test tracking, async writes
-├── post-commit.js ← PostToolUse: batch save commit to KG
-├── subagent-stop.js ← SubagentStop: capture code review results
-├── stop.js ← Stop: batch save, cache, archive, cleanup
-├── hook-utils.js ← Shared: sqliteBatch, async I/O, constants
-├── templates/
-│ └── planning-template.md ← SDD+BDD+edge case template
-└── __tests__/
- ├── hook-test-harness.js ← Test runner (no Claude Code needed)
- └── hooks.test.js ← 15 test cases
-```
-
-### Handler Flow (PreToolUse)
-
-```
-PreToolUse event
- ↓
-┌──────────────────────┐
-│ Handler Registry │
-│ ├─ codeReview │ → additionalContext (review reminder)
-│ ├─ routing │ → updatedInput (model, background)
-│ ├─ planning │ → updatedInput.prompt (template)
-│ └─ dryRunGate │ → additionalContext (untested warning)
-└──────────────────────┘
- ↓
-┌──────────────────────┐
-│ Response Merger │
-│ • updatedInput: deep-merge
-│ • additionalContext: concatenate
-│ • permissionDecision: most-restrictive
-└──────────────────────┘
- ↓
-Single JSON → Claude Code
-```
-
----
-
-## Testing
-
-```bash
-# Run all 15 tests
-node scripts/hooks/__tests__/hooks.test.js
-
-# Test individual hook with mock input
-node scripts/hooks/__tests__/hook-test-harness.js pre-tool-use.js \
- '{"tool_name":"Task","tool_input":{"subagent_type":"Plan","prompt":"test"}}'
-
-# Syntax check all hooks
-for f in scripts/hooks/*.js; do node --check "$f" && echo "OK: $f"; done
-```
-
-## Configuration
-
-### Routing Config (`~/.memesh/routing-config.json`)
-
-| Field | Description |
-|-------|-------------|
-| `modelRouting.rules` | Subagent → model mapping |
-| `backgroundRules` | Subagent → force background |
-| `planningEnforcement.enabled` | Inject SDD+BDD template |
-| `dryRunGate.enabled` | Warn on untested code |
-| `auditLog` | Log routing decisions |
-
-### Thresholds (`hook-utils.js`)
-
-```javascript
-THRESHOLDS = {
- TOKEN_SAVE: 250_000,
- RETENTION_DAYS: 30,
- MAX_ARCHIVED_SESSIONS: 30
-}
-```
-
----
-
-Part of MeMesh project. License: MIT.
diff --git a/scripts/hooks/__tests__/hook-test-harness.js b/scripts/hooks/__tests__/hook-test-harness.js
deleted file mode 100644
index 39f42780..00000000
--- a/scripts/hooks/__tests__/hook-test-harness.js
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * Hook Test Harness — Simulates Claude Code hook execution.
- *
- * Pipes mock stdin JSON to a hook script and validates the output.
- * Does NOT require Claude Code runtime.
- *
- * Usage:
- * node hook-test-harness.js
- * node hook-test-harness.js ../pre-tool-use.js '{"tool_name":"Bash","tool_input":{"command":"git commit -m test"}}'
- *
- * Or programmatically:
- * import { runHook, assertJSON, assertContains } from './hook-test-harness.js';
- */
-
-import { execFile } from 'child_process';
-import path from 'path';
-import { fileURLToPath } from 'url';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = path.dirname(__filename);
-
-// ============================================================================
-// Core Test Functions
-// ============================================================================
-
-/**
- * Run a hook script with mock stdin and capture output.
- *
- * @param {string} hookPath - Path to hook script (relative to hooks/ dir or absolute)
- * @param {Object|string} stdinData - JSON data to pipe as stdin
- * @param {Object} options - Options
- * @param {number} options.timeout - Timeout in ms (default: 10000)
- * @param {Object} options.env - Additional environment variables
- * @returns {Promise<{ stdout: string, stderr: string, exitCode: number, parsed: Object|null }>}
- */
-export function runHook(hookPath, stdinData, options = {}) {
- const { timeout = 10000, env = {} } = options;
-
- // Resolve hook path relative to hooks directory
- const resolvedPath = path.isAbsolute(hookPath)
- ? hookPath
- : path.resolve(__dirname, '..', hookPath);
-
- const stdinStr = typeof stdinData === 'string'
- ? stdinData
- : JSON.stringify(stdinData);
-
- return new Promise((resolve) => {
- const child = execFile('node', [resolvedPath], {
- encoding: 'utf-8',
- timeout,
- env: { ...process.env, ...env },
- }, (error, stdout, stderr) => {
- let parsed = null;
- try {
- if (stdout.trim()) {
- parsed = JSON.parse(stdout.trim());
- }
- } catch {
- // Not JSON output — that's fine for some hooks
- }
-
- resolve({
- stdout: stdout || '',
- stderr: stderr || '',
- exitCode: error ? (error.code || 1) : 0,
- parsed,
- });
- });
-
- // Pipe stdin
- if (child.stdin) {
- child.stdin.write(stdinStr);
- child.stdin.end();
- }
- });
-}
-
-// ============================================================================
-// Assertion Helpers
-// ============================================================================
-
-/**
- * Assert that the hook output is valid JSON with expected structure.
- * @param {Object} result - Result from runHook()
- * @param {string} hookEventName - Expected hookEventName
- * @returns {boolean}
- */
-export function assertHookResponse(result, hookEventName) {
- if (!result.parsed) {
- console.error(` FAIL: No JSON output`);
- return false;
- }
-
- const output = result.parsed.hookSpecificOutput;
- if (!output) {
- console.error(` FAIL: Missing hookSpecificOutput`);
- return false;
- }
-
- if (output.hookEventName !== hookEventName) {
- console.error(` FAIL: hookEventName is "${output.hookEventName}", expected "${hookEventName}"`);
- return false;
- }
-
- return true;
-}
-
-/**
- * Assert that stdout contains a substring.
- * @param {Object} result - Result from runHook()
- * @param {string} substring - Expected substring
- * @returns {boolean}
- */
-export function assertContains(result, substring) {
- const fullOutput = result.stdout + result.stderr;
- if (!fullOutput.includes(substring)) {
- console.error(` FAIL: Output does not contain "${substring}"`);
- return false;
- }
- return true;
-}
-
-/**
- * Assert hook exited silently (no stdout, exit 0).
- * @param {Object} result - Result from runHook()
- * @returns {boolean}
- */
-export function assertSilent(result) {
- if (result.stdout.trim() !== '') {
- console.error(` FAIL: Expected silent exit, got stdout: ${result.stdout.substring(0, 100)}`);
- return false;
- }
- return true;
-}
-
-// ============================================================================
-// Test Runner
-// ============================================================================
-
-/**
- * Simple test runner for hook tests.
- * @param {string} suiteName - Test suite name
- * @param {Array<{name: string, fn: Function}>} tests - Test cases
- */
-export async function runTests(suiteName, tests) {
- console.log(`\n ${suiteName}`);
- console.log(' ' + '─'.repeat(50));
-
- let passed = 0;
- let failed = 0;
-
- for (const test of tests) {
- try {
- const result = await test.fn();
- if (result !== false) {
- console.log(` ✅ ${test.name}`);
- passed++;
- } else {
- console.log(` ❌ ${test.name}`);
- failed++;
- }
- } catch (error) {
- console.log(` ❌ ${test.name}`);
- console.error(` Error: ${error.message}`);
- failed++;
- }
- }
-
- console.log(' ' + '─'.repeat(50));
- console.log(` Results: ${passed} passed, ${failed} failed\n`);
-
- return { passed, failed };
-}
-
-// ============================================================================
-// CLI Mode
-// ============================================================================
-
-async function main() {
- const args = process.argv.slice(2);
-
- if (args.length < 2) {
- console.log('Usage: node hook-test-harness.js ');
- console.log('');
- console.log('Examples:');
- console.log(' node hook-test-harness.js pre-tool-use.js \'{"tool_name":"Bash","tool_input":{"command":"git commit -m test"}}\'');
- console.log(' node hook-test-harness.js post-tool-use.js \'{"tool_name":"Read","tool_input":{"file_path":"/tmp/test.js"}}\'');
- process.exit(0);
- }
-
- const [hookScript, stdinJSON] = args;
-
- console.log(`\nRunning: ${hookScript}`);
- console.log(`Stdin: ${stdinJSON.substring(0, 100)}...`);
- console.log('');
-
- const result = await runHook(hookScript, stdinJSON);
-
- console.log(`Exit code: ${result.exitCode}`);
- if (result.stdout.trim()) {
- console.log(`Stdout: ${result.stdout.trim()}`);
- }
- if (result.stderr.trim()) {
- console.log(`Stderr: ${result.stderr.trim()}`);
- }
- if (result.parsed) {
- console.log(`Parsed JSON:`);
- console.log(JSON.stringify(result.parsed, null, 2));
- }
-}
-
-// Run CLI mode if invoked directly
-if (process.argv[1] && process.argv[1].includes('hook-test-harness')) {
- main().catch(console.error);
-}
diff --git a/scripts/hooks/__tests__/hooks.test.js b/scripts/hooks/__tests__/hooks.test.js
deleted file mode 100644
index 9740a82f..00000000
--- a/scripts/hooks/__tests__/hooks.test.js
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * Hook Tests — Validates hook behavior without Claude Code runtime.
- *
- * Run: node scripts/hooks/__tests__/hooks.test.js
- */
-
-import { runHook, assertHookResponse, assertSilent, runTests } from './hook-test-harness.js';
-
-// ============================================================================
-// PreToolUse Tests
-// ============================================================================
-
-const preToolUseTests = [
- {
- name: 'Git commit without review triggers reminder or silent exit',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Bash',
- tool_input: { command: 'git commit -m "test commit"' },
- });
- // Should produce JSON output with review reminder
- // OR exit silently if codeReviewDone=true from prior test
- if (result.parsed) {
- return assertHookResponse(result, 'PreToolUse');
- }
- // Silent exit is acceptable (review already done or no session file)
- return assertSilent(result);
- },
- },
- {
- name: 'Non-git-commit Bash exits silently',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Bash',
- tool_input: { command: 'ls -la' },
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Non-Bash tool exits silently',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Read',
- tool_input: { file_path: '/tmp/test.js' },
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Git amend exits silently',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Bash',
- tool_input: { command: 'git commit --amend -m "fix"' },
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Empty stdin exits without error',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', '');
- return result.exitCode === 0;
- },
- },
-];
-
-// ============================================================================
-// PreToolUse — Smart Router Tests (1B)
-// ============================================================================
-
-const smartRouterTests = [
- {
- name: 'Task(Explore) gets model routing or silent exit',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Task',
- tool_input: { subagent_type: 'Explore', prompt: 'find auth code' },
- });
- // Either: hook produces routing output with haiku, or exits silently (no config)
- if (!result.parsed) {
- return assertSilent(result); // No output = no config, valid
- }
- const output = result.parsed?.hookSpecificOutput;
- if (!output) return false; // Parsed but no hookSpecificOutput = malformed
- // If routing was applied, model should be haiku
- if (output.updatedInput?.model) {
- return output.updatedInput.model === 'haiku';
- }
- // Output present but no model routing = other handler fired, OK
- return true;
- },
- },
- {
- name: 'Task with explicit model preserves user choice',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Task',
- tool_input: { subagent_type: 'Explore', model: 'opus', prompt: 'deep analysis' },
- });
- // Should NOT override user's explicit model
- if (result.parsed?.hookSpecificOutput?.updatedInput?.model) {
- // Any model override when user specified 'opus' is wrong
- return false;
- }
- // No model override = correct behavior
- return true;
- },
- },
- {
- name: 'Task(Plan) gets planning template injected',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Task',
- tool_input: { subagent_type: 'Plan', prompt: 'plan the auth refactor' },
- });
- if (!result.parsed) {
- // No output = template file not found. Acceptable but log it.
- return assertSilent(result);
- }
- const output = result.parsed?.hookSpecificOutput;
- if (!output) return false; // Parsed but no hookSpecificOutput = malformed
- // Prompt should contain original + template content
- if (output.updatedInput?.prompt) {
- const prompt = output.updatedInput.prompt;
- // Must contain original prompt AND some template content
- return prompt.includes('plan the auth refactor') &&
- (prompt.includes('Required Plan Sections') || prompt.includes('---'));
- }
- // No prompt modification = handler didn't fire, unexpected
- return false;
- },
- },
- {
- name: 'EnterPlanMode gets context with template reference',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'EnterPlanMode',
- tool_input: {},
- });
- if (!result.parsed) {
- // No output = template file not found. Acceptable.
- return assertSilent(result);
- }
- const output = result.parsed?.hookSpecificOutput;
- if (!output) return false; // Parsed but no hookSpecificOutput = malformed
- if (output.additionalContext) {
- return output.additionalContext.includes('PLANNING MODE');
- }
- // hookSpecificOutput without additionalContext = wrong handler response
- return false;
- },
- },
- {
- name: 'Non-Task tool is not affected by routing',
- fn: async () => {
- const result = await runHook('pre-tool-use.js', {
- tool_name: 'Grep',
- tool_input: { pattern: 'test', path: '/tmp' },
- });
- return assertSilent(result);
- },
- },
-];
-
-// ============================================================================
-// PostToolUse Tests
-// ============================================================================
-
-const postToolUseTests = [
- {
- name: 'Read tool exits silently',
- fn: async () => {
- const result = await runHook('post-tool-use.js', {
- tool_name: 'Read',
- tool_input: { file_path: '/tmp/test.js' },
- success: true,
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Bash tool exits silently',
- fn: async () => {
- const result = await runHook('post-tool-use.js', {
- tool_name: 'Bash',
- tool_input: { command: 'echo hello' },
- success: true,
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Empty stdin exits without error',
- fn: async () => {
- const result = await runHook('post-tool-use.js', '');
- return result.exitCode === 0;
- },
- },
-];
-
-// ============================================================================
-// PostCommit Tests
-// ============================================================================
-
-const postCommitTests = [
- {
- name: 'Non-git-commit exits silently',
- fn: async () => {
- const result = await runHook('post-commit.js', {
- tool_name: 'Read',
- tool_input: { file_path: '/tmp/test.js' },
- success: true,
- });
- return assertSilent(result);
- },
- },
- {
- name: 'Failed command exits silently',
- fn: async () => {
- const result = await runHook('post-commit.js', {
- tool_name: 'Bash',
- tool_input: { command: 'git commit -m "test"' },
- success: false,
- });
- return assertSilent(result);
- },
- },
-];
-
-// ============================================================================
-// Run All Tests
-// ============================================================================
-
-async function main() {
- console.log('\n🧪 Hook Test Suite\n');
-
- let totalPassed = 0;
- let totalFailed = 0;
-
- const suites = [
- { name: 'PreToolUse Hook (Code Review)', tests: preToolUseTests },
- { name: 'PreToolUse Hook (Smart Router)', tests: smartRouterTests },
- { name: 'PostToolUse Hook', tests: postToolUseTests },
- { name: 'PostCommit Hook', tests: postCommitTests },
- ];
-
- for (const suite of suites) {
- const { passed, failed } = await runTests(suite.name, suite.tests);
- totalPassed += passed;
- totalFailed += failed;
- }
-
- console.log('═'.repeat(55));
- console.log(` Total: ${totalPassed} passed, ${totalFailed} failed`);
- console.log('═'.repeat(55));
-
- process.exit(totalFailed > 0 ? 1 : 0);
-}
-
-main().catch(error => {
- console.error('Test runner error:', error);
- process.exit(1);
-});
diff --git a/scripts/hooks/__tests__/post-tool-use-recall.test.js b/scripts/hooks/__tests__/post-tool-use-recall.test.js
deleted file mode 100644
index 15d1c5e1..00000000
--- a/scripts/hooks/__tests__/post-tool-use-recall.test.js
+++ /dev/null
@@ -1,192 +0,0 @@
-import { describe, it, expect } from 'vitest';
-import {
- isTestCommand,
- extractTestFailureContext,
- buildTestFailureQuery,
- buildErrorQuery,
-} from '../post-tool-use-recall-utils.js';
-
-describe('isTestCommand', () => {
- it('detects "npm test"', () => {
- expect(isTestCommand('npm test')).toBe(true);
- });
-
- it('detects "npm run test"', () => {
- expect(isTestCommand('npm run test')).toBe(true);
- });
-
- it('detects "npx vitest"', () => {
- expect(isTestCommand('npx vitest')).toBe(true);
- });
-
- it('detects "vitest run"', () => {
- expect(isTestCommand('vitest run')).toBe(true);
- });
-
- it('detects "vitest" alone', () => {
- expect(isTestCommand('vitest')).toBe(true);
- });
-
- it('detects "npx jest"', () => {
- expect(isTestCommand('npx jest')).toBe(true);
- });
-
- it('detects "jest" alone', () => {
- expect(isTestCommand('jest')).toBe(true);
- });
-
- it('detects "pytest"', () => {
- expect(isTestCommand('pytest')).toBe(true);
- });
-
- it('detects "bun test"', () => {
- expect(isTestCommand('bun test')).toBe(true);
- });
-
- it('detects "mocha"', () => {
- expect(isTestCommand('mocha')).toBe(true);
- });
-
- it('rejects "ls -la"', () => {
- expect(isTestCommand('ls -la')).toBe(false);
- });
-
- it('rejects "git commit"', () => {
- expect(isTestCommand('git commit -m "test"')).toBe(false);
- });
-
- it('rejects "echo test"', () => {
- expect(isTestCommand('echo test')).toBe(false);
- });
-
- it('rejects empty string', () => {
- expect(isTestCommand('')).toBe(false);
- });
-
- it('rejects null/undefined', () => {
- expect(isTestCommand(null)).toBe(false);
- expect(isTestCommand(undefined)).toBe(false);
- });
-
- it('is case-insensitive', () => {
- expect(isTestCommand('NPM TEST')).toBe(true);
- expect(isTestCommand('Vitest Run')).toBe(true);
- });
-});
-
-describe('extractTestFailureContext', () => {
- it('returns null for null/undefined/empty input', () => {
- expect(extractTestFailureContext(null)).toBeNull();
- expect(extractTestFailureContext(undefined)).toBeNull();
- expect(extractTestFailureContext('')).toBeNull();
- });
-
- it('returns null for passing test output', () => {
- const output = `
- ✓ src/utils/helper.test.ts (3 tests) 12ms
- Test Files 1 passed (1)
- Tests 3 passed (3)
-`;
- expect(extractTestFailureContext(output)).toBeNull();
- });
-
- it('extracts from vitest FAIL output', () => {
- const output = `
- FAIL src/mcp/handlers/HookToolHandler.test.ts
- ✕ should process tool use (5ms)
- Error: expect(received).toBe(expected)
-`;
- const ctx = extractTestFailureContext(output);
- expect(ctx).not.toBeNull();
- expect(ctx.testName).toBe('src/mcp/handlers/HookToolHandler.test.ts');
- expect(ctx.errorMessage).toMatch(/Error/);
- });
-
- it('extracts from jest-style FAIL output', () => {
- const output = `
-FAIL src/auth/login.test.js
- ● should validate credentials
- expect(received).toBe(expected)
-`;
- const ctx = extractTestFailureContext(output);
- expect(ctx).not.toBeNull();
- expect(ctx.testName).toBe('src/auth/login.test.js');
- });
-
- it('extracts error message from output with "failed"', () => {
- const output = `
-Tests: 2 failed, 5 passed
-Error: Connection refused
-`;
- const ctx = extractTestFailureContext(output);
- expect(ctx).not.toBeNull();
- expect(ctx.errorMessage).toMatch(/Error.*Connection refused/);
- });
-
- it('handles output with ✕ marker', () => {
- const output = `
- ✕ my test case
-`;
- const ctx = extractTestFailureContext(output);
- expect(ctx).not.toBeNull();
- });
-});
-
-describe('buildTestFailureQuery', () => {
- it('combines short test name + error message', () => {
- const result = buildTestFailureQuery('src/utils/helper.test.ts', 'Connection refused');
- expect(result).toBe('helper Connection refused');
- });
-
- it('strips directory path and test/spec suffix', () => {
- const result = buildTestFailureQuery('src/mcp/handlers/HookToolHandler.test.ts', 'error');
- expect(result).toBe('HookToolHandler error');
- });
-
- it('strips spec suffix', () => {
- const result = buildTestFailureQuery('auth.spec.js', 'fail');
- expect(result).toBe('auth fail');
- });
-
- it('handles unknown test name', () => {
- const result = buildTestFailureQuery('unknown test', 'some error');
- expect(result).toBe('unknown test some error');
- });
-
- it('handles empty error message', () => {
- const result = buildTestFailureQuery('src/foo.test.ts', '');
- expect(result).toBe('foo');
- });
-});
-
-describe('buildErrorQuery', () => {
- it('combines error type + first line of message', () => {
- const result = buildErrorQuery('TypeError', 'Cannot read property x of undefined');
- expect(result).toBe('TypeError Cannot read property x of undefined');
- });
-
- it('uses only first line of multiline message', () => {
- const result = buildErrorQuery('ReferenceError', 'x is not defined\n at foo.js:10\n at bar.js:20');
- expect(result).toBe('ReferenceError x is not defined');
- });
-
- it('handles null/undefined error type', () => {
- const result = buildErrorQuery(null, 'something broke');
- expect(result).toBe('Error something broke');
- });
-
- it('handles undefined error type', () => {
- const result = buildErrorQuery(undefined, 'something broke');
- expect(result).toBe('Error something broke');
- });
-
- it('handles null/undefined error message', () => {
- const result = buildErrorQuery('SyntaxError', null);
- expect(result).toBe('SyntaxError');
- });
-
- it('handles empty inputs', () => {
- const result = buildErrorQuery('', '');
- expect(result).toBe('Error');
- });
-});
diff --git a/scripts/hooks/__tests__/session-start-recall.test.js b/scripts/hooks/__tests__/session-start-recall.test.js
deleted file mode 100644
index faee907b..00000000
--- a/scripts/hooks/__tests__/session-start-recall.test.js
+++ /dev/null
@@ -1,86 +0,0 @@
-import { describe, it, expect } from 'vitest';
-import { buildSessionRecallQuery, formatRecallOutput } from '../session-start-recall-utils.js';
-
-describe('buildSessionRecallQuery', () => {
- it('combines project name and commits', () => {
- const result = buildSessionRecallQuery('my-project', ['add login page', 'fix header bug']);
- expect(result).toBe('my-project add login page fix header bug');
- });
-
- it('handles empty commits array', () => {
- const result = buildSessionRecallQuery('my-project', []);
- expect(result).toBe('my-project');
- });
-
- it('handles undefined commits', () => {
- const result = buildSessionRecallQuery('my-project');
- expect(result).toBe('my-project');
- });
-
- it('strips conventional commit prefixes', () => {
- const commits = [
- 'fix: resolve null pointer',
- 'feat(auth): add OAuth support',
- 'chore(deps): update dependencies',
- 'refactor: simplify logic',
- ];
- const result = buildSessionRecallQuery('app', commits);
- expect(result).toBe('app resolve null pointer add OAuth support update dependencies simplify logic');
- });
-
- it('filters out empty strings after stripping prefixes', () => {
- const commits = ['fix:', 'feat(scope): '];
- const result = buildSessionRecallQuery('app', commits);
- expect(result).toBe('app');
- });
-});
-
-describe('formatRecallOutput', () => {
- it('formats results with similarity percentage', () => {
- const results = [
- { name: 'Entity1', observations: ['obs1', 'obs2'], similarity: 0.85 },
- ];
- const output = formatRecallOutput(results);
- expect(output).toBe(' - Entity1 (85%): obs1; obs2');
- });
-
- it('returns empty string for empty results', () => {
- expect(formatRecallOutput([])).toBe('');
- });
-
- it('returns empty string for null results', () => {
- expect(formatRecallOutput(null)).toBe('');
- });
-
- it('returns empty string for undefined results', () => {
- expect(formatRecallOutput(undefined)).toBe('');
- });
-
- it('limits observations to 2', () => {
- const results = [
- { name: 'Entity1', observations: ['obs1', 'obs2', 'obs3', 'obs4'], similarity: 0.7 },
- ];
- const output = formatRecallOutput(results);
- expect(output).toBe(' - Entity1 (70%): obs1; obs2');
- });
-
- it('formats multiple results on separate lines', () => {
- const results = [
- { name: 'A', observations: ['a1'], similarity: 0.9 },
- { name: 'B', observations: ['b1', 'b2'], similarity: 0.6 },
- ];
- const output = formatRecallOutput(results);
- const lines = output.split('\n');
- expect(lines).toHaveLength(2);
- expect(lines[0]).toBe(' - A (90%): a1');
- expect(lines[1]).toBe(' - B (60%): b1; b2');
- });
-
- it('rounds similarity percentage', () => {
- const results = [
- { name: 'X', observations: ['x1'], similarity: 0.456 },
- ];
- const output = formatRecallOutput(results);
- expect(output).toBe(' - X (46%): x1');
- });
-});
diff --git a/scripts/hooks/hook-utils.js b/scripts/hooks/hook-utils.js
deleted file mode 100644
index c3a18034..00000000
--- a/scripts/hooks/hook-utils.js
+++ /dev/null
@@ -1,899 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * Hook Utilities - Shared functions for Claude Code hooks
- *
- * This module provides common utilities used across all hooks:
- * - File I/O (JSON read/write)
- * - SQLite queries with SQL injection protection
- * - Path constants
- * - Time utilities
- *
- * All hooks should import from this module to avoid code duplication.
- */
-
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-import { execFileSync } from 'child_process';
-
-// ============================================================================
-// Constants
-// ============================================================================
-
-/** Home directory with fallback */
-export const HOME_DIR = process.env.HOME || os.homedir();
-
-/** State directory for hook data */
-export const STATE_DIR = path.join(HOME_DIR, '.claude', 'state');
-
-/** MeMesh knowledge graph database path (mirrors PathResolver logic from src/utils/PathResolver.ts) */
-function resolveMemeshDbPath() {
- const primaryDir = path.join(HOME_DIR, '.memesh');
- const legacyDir = path.join(HOME_DIR, '.claude-code-buddy');
-
- if (fs.existsSync(path.join(primaryDir, 'knowledge-graph.db'))) {
- return path.join(primaryDir, 'knowledge-graph.db');
- }
- if (fs.existsSync(path.join(legacyDir, 'knowledge-graph.db'))) {
- return path.join(legacyDir, 'knowledge-graph.db');
- }
- return path.join(primaryDir, 'knowledge-graph.db');
-}
-
-export const MEMESH_DB_PATH = resolveMemeshDbPath();
-
-/** Hook error log file */
-export const ERROR_LOG_PATH = path.join(STATE_DIR, 'hook-errors.log');
-
-/** Memory saves log file */
-export const MEMORY_LOG_PATH = path.join(STATE_DIR, 'memory-saves.log');
-
-// Time constants (in milliseconds)
-export const TIME = {
- SECOND: 1000,
- MINUTE: 60 * 1000,
- HOUR: 60 * 60 * 1000,
- DAY: 24 * 60 * 60 * 1000,
-};
-
-// Threshold constants
-export const THRESHOLDS = {
- /** Token threshold for auto-saving key points */
- TOKEN_SAVE: 250_000,
- /** Days to retain session key points */
- RETENTION_DAYS: 30,
- /** Days to recall key points on session start */
- RECALL_DAYS: 30,
- /** Slow execution threshold (ms) */
- SLOW_EXECUTION: 5000,
- /** High token usage threshold */
- HIGH_TOKENS: 10_000,
- /** Quota warning percentage */
- QUOTA_WARNING: 0.8,
- /** Heartbeat validity duration (ms) */
- HEARTBEAT_VALIDITY: 5 * 60 * 1000,
- /** Maximum number of archived sessions to keep */
- MAX_ARCHIVED_SESSIONS: 30,
-};
-
-// ============================================================================
-// File I/O Utilities
-// ============================================================================
-
-/**
- * Ensure a directory exists, creating it if necessary
- * @param {string} dirPath - Directory path to ensure exists
- */
-export function ensureDir(dirPath) {
- if (!fs.existsSync(dirPath)) {
- fs.mkdirSync(dirPath, { recursive: true });
- }
-}
-
-/**
- * Read JSON file with error handling
- * @param {string} filePath - Path to JSON file
- * @param {*} defaultValue - Default value if file doesn't exist or is invalid
- * @returns {*} Parsed JSON or default value
- */
-export function readJSONFile(filePath, defaultValue = {}) {
- try {
- if (fs.existsSync(filePath)) {
- const content = fs.readFileSync(filePath, 'utf-8');
- return JSON.parse(content);
- }
- } catch (error) {
- logError(`Read error ${path.basename(filePath)}`, error);
- }
- return defaultValue;
-}
-
-/**
- * Write JSON file with error handling
- * @param {string} filePath - Path to JSON file
- * @param {*} data - Data to write
- * @returns {boolean} True if successful
- */
-export function writeJSONFile(filePath, data) {
- try {
- fs.writeFileSync(filePath, JSON.stringify(data, null, 2), 'utf-8');
- return true;
- } catch (error) {
- logError(`Write error ${path.basename(filePath)}`, error);
- return false;
- }
-}
-
-// ============================================================================
-// Logging Utilities
-// ============================================================================
-
-/**
- * Log error to error log file (silent - no console output)
- * @param {string} context - Error context description
- * @param {Error|string} error - Error object or message
- */
-export function logError(context, error) {
- const message = error instanceof Error ? error.message : String(error);
- const timestamp = new Date().toISOString();
- const logLine = `[${timestamp}] ${context}: ${message}\n`;
-
- try {
- ensureDir(STATE_DIR);
- fs.appendFileSync(ERROR_LOG_PATH, logLine);
- } catch (logErr) {
- process.stderr.write(`[logError FAILED] ${context}: ${message} (log error: ${logErr.message})\n`);
- }
-}
-
-/**
- * Log memory save event
- * @param {string} message - Log message
- */
-export function logMemorySave(message) {
- const timestamp = new Date().toISOString();
- const logLine = `[${timestamp}] ${message}\n`;
-
- try {
- ensureDir(STATE_DIR);
- fs.appendFileSync(MEMORY_LOG_PATH, logLine);
- } catch (logErr) {
- process.stderr.write(`[logMemorySave FAILED] ${message} (log error: ${logErr.message})\n`);
- }
-}
-
-// ============================================================================
-// SQLite Utilities (SQL Injection Safe)
-// ============================================================================
-
-/**
- * Escape a value for safe SQL string interpolation.
- * Numbers are returned unquoted; strings are quoted with single-quote escaping.
- * @param {*} value - Value to escape
- * @returns {string} Escaped SQL literal
- */
-export function escapeSQL(value) {
- if (value === null || value === undefined) {
- return 'NULL';
- }
- // Numbers don't need quoting in SQL
- if (typeof value === 'number' && Number.isFinite(value)) {
- return String(value);
- }
- // Booleans as integers
- if (typeof value === 'boolean') {
- return value ? '1' : '0';
- }
- // Everything else: coerce to string and escape single quotes
- return `'${String(value).replace(/'/g, "''")}'`;
-}
-
-/**
- * Execute SQLite query with parameterized values (SQL injection safe)
- *
- * Uses placeholder replacement for safe parameter binding.
- * Parameters are escaped by doubling single quotes.
- *
- * @param {string} dbPath - Path to SQLite database
- * @param {string} query - SQL query with ? placeholders
- * @param {Array} params - Parameter values to substitute
- * @param {Object} options - Query options
- * @param {number} options.timeout - Timeout in ms (default: 5000)
- * @param {boolean} options.json - Use JSON output mode (default: false)
- * @returns {string|null} Query result as string, or null on error
- *
- * @example
- * // Basic query
- * sqliteQuery(dbPath, 'SELECT * FROM users WHERE id = ?', [userId]);
- *
- * // JSON output mode
- * sqliteQuery(dbPath, 'SELECT * FROM users', [], { json: true });
- */
-export function sqliteQuery(dbPath, query, params = [], options = {}) {
- const { timeout = 5000, json = false } = options;
-
- try {
- let finalQuery = query;
-
- // Replace ? placeholders with escaped values
- if (params.length > 0) {
- let paramIndex = 0;
- finalQuery = query.replace(/\?/g, () => {
- if (paramIndex < params.length) {
- return escapeSQL(params[paramIndex++]);
- }
- return '?';
- });
- }
-
- const args = json ? ['-json', dbPath, finalQuery] : [dbPath, finalQuery];
-
- const result = execFileSync('sqlite3', args, {
- encoding: 'utf-8',
- timeout,
- });
- return result.trim();
- } catch (error) {
- logError('sqliteQuery', error);
- return null;
- }
-}
-
-/**
- * Execute SQLite query and parse JSON result
- *
- * @param {string} dbPath - Path to SQLite database
- * @param {string} query - SQL query with ? placeholders
- * @param {Array} params - Parameter values to substitute
- * @param {Object} options - Query options
- * @returns {Array|null} Parsed JSON array, empty array for no rows, or null on error
- */
-export function sqliteQueryJSON(dbPath, query, params = [], options = {}) {
- const result = sqliteQuery(dbPath, query, params, { ...options, json: true });
-
- // sqliteQuery returns null on error — propagate to caller
- if (result === null) {
- return null;
- }
-
- // Empty string means no matching rows
- if (result === '') {
- return [];
- }
-
- try {
- return JSON.parse(result);
- } catch (error) {
- logError('sqliteQueryJSON parse', error);
- return null;
- }
-}
-
-// ============================================================================
-// Time Utilities
-// ============================================================================
-
-/**
- * Get human-readable time ago string
- * @param {Date} date - Date to compare
- * @returns {string} Human-readable time difference
- */
-export function getTimeAgo(date) {
- const now = new Date();
- const diffMs = now - date;
- const diffMins = Math.floor(diffMs / TIME.MINUTE);
- const diffHours = Math.floor(diffMs / TIME.HOUR);
- const diffDays = Math.floor(diffMs / TIME.DAY);
-
- if (diffMins < 1) return 'Just now';
- if (diffMins < 60) return `${diffMins} minutes ago`;
- if (diffHours < 24) return `${diffHours} hours ago`;
- if (diffDays === 1) return 'Yesterday';
- if (diffDays < 7) return `${diffDays} days ago`;
- return date.toLocaleDateString();
-}
-
-/**
- * Calculate duration string from start time
- * @param {string} startTime - ISO timestamp string
- * @returns {string} Duration string (e.g., "5m 30s")
- */
-export function calculateDuration(startTime) {
- const start = new Date(startTime);
- const end = new Date();
- const durationMs = end - start;
- const minutes = Math.floor(durationMs / TIME.MINUTE);
- const seconds = Math.floor((durationMs % TIME.MINUTE) / TIME.SECOND);
- return minutes > 0 ? `${minutes}m ${seconds}s` : `${seconds}s`;
-}
-
-/**
- * Get ISO date string (YYYY-MM-DD)
- * @param {Date} date - Date object (default: now)
- * @returns {string} Date string
- */
-export function getDateString(date = new Date()) {
- return date.toISOString().split('T')[0];
-}
-
-// ============================================================================
-// Stdin Utilities
-// ============================================================================
-
-/**
- * Read stdin with timeout protection
- * Properly removes event listeners to prevent memory leaks
- * @param {number} timeout - Timeout in milliseconds
- * @returns {Promise} Stdin content
- */
-export function readStdin(timeout = 3000) {
- return new Promise((resolve, reject) => {
- // Fast path: stdin already closed/ended — avoids 3s timeout hang
- if (process.stdin.readableEnded || process.stdin.destroyed) {
- return resolve('');
- }
-
- let data = '';
-
- const cleanup = () => {
- process.stdin.removeListener('data', onData);
- process.stdin.removeListener('end', onEnd);
- process.stdin.removeListener('error', onError);
- };
-
- const timer = setTimeout(() => {
- cleanup();
- reject(new Error('Stdin read timeout'));
- }, timeout);
-
- const onData = (chunk) => {
- data += chunk;
- };
-
- const onEnd = () => {
- clearTimeout(timer);
- cleanup();
- resolve(data);
- };
-
- const onError = (err) => {
- clearTimeout(timer);
- cleanup();
- reject(err);
- };
-
- process.stdin.on('data', onData);
- process.stdin.on('end', onEnd);
- process.stdin.on('error', onError);
- });
-}
-
-// ============================================================================
-// Batch SQLite Operations
-// ============================================================================
-
-/**
- * Execute multiple SQLite statements in a single process spawn.
- * Wraps all statements in BEGIN/COMMIT for atomicity.
- *
- * Performance: 1 process spawn instead of N, saving ~100ms per avoided spawn.
- *
- * @param {string} dbPath - Path to SQLite database
- * @param {Array<{query: string, params?: Array}>} statements - SQL statements
- * @param {Object} options - Options
- * @param {number} options.timeout - Timeout in ms (default: 10000)
- * @param {number} options.chunkSize - Max statements per batch (default: 50)
- * @returns {string|null} Combined output, or null on error
- */
-export function sqliteBatch(dbPath, statements, options = {}) {
- const { timeout = 10000, chunkSize = 50 } = options;
-
- if (!statements || statements.length === 0) return '';
-
- try {
- const chunks = [];
- for (let i = 0; i < statements.length; i += chunkSize) {
- chunks.push(statements.slice(i, i + chunkSize));
- }
-
- let output = '';
- for (const chunk of chunks) {
- const batchSQL = ['BEGIN TRANSACTION;'];
-
- for (const stmt of chunk) {
- let finalQuery = stmt.query;
- if (stmt.params && stmt.params.length > 0) {
- let paramIndex = 0;
- finalQuery = stmt.query.replace(/\?/g, () => {
- if (paramIndex < stmt.params.length) {
- return escapeSQL(stmt.params[paramIndex++]);
- }
- return '?';
- });
- }
- if (!finalQuery.trim().endsWith(';')) {
- finalQuery += ';';
- }
- batchSQL.push(finalQuery);
- }
-
- batchSQL.push('COMMIT;');
-
- // Pipe SQL via stdin to avoid E2BIG on large batches
- const result = execFileSync('sqlite3', [dbPath], {
- encoding: 'utf-8',
- timeout,
- input: batchSQL.join('\n'),
- });
- if (result.trim()) {
- output += result.trim() + '\n';
- }
- }
-
- return output.trim();
- } catch (error) {
- logError('sqliteBatch', error);
- return null;
- }
-}
-
-/**
- * Insert entity + observations + tags in minimal spawns.
- * Common pattern used by post-commit and stop hooks.
- *
- * Uses a three-step approach:
- * 1. INSERT entity (1 spawn)
- * 2. SELECT entity ID (1 spawn)
- * 3. Batch INSERT all observations + tags (1 spawn)
- *
- * Total: 3 spawns (was N+2 before batching).
- *
- * @param {string} dbPath - Path to SQLite database
- * @param {Object} entity - Entity to insert
- * @param {string} entity.name - Entity name (must be unique)
- * @param {string} entity.type - Entity type
- * @param {string} entity.metadata - JSON metadata string
- * @param {Array} observations - Observation content strings
- * @param {Array} tags - Tag strings
- * @returns {number|null} Entity ID, or null on failure
- */
-export function sqliteBatchEntity(dbPath, entity, observations = [], tags = []) {
- try {
- const now = new Date().toISOString();
-
- // Step 1: Insert entity (need the ID for subsequent inserts)
- const insertResult = sqliteQuery(
- dbPath,
- 'INSERT INTO entities (name, type, created_at, metadata) VALUES (?, ?, ?, ?)',
- [entity.name, entity.type, now, entity.metadata || '{}']
- );
- if (insertResult === null) return null;
-
- const entityIdResult = sqliteQuery(
- dbPath,
- 'SELECT id FROM entities WHERE name = ?',
- [entity.name]
- );
- if (entityIdResult === null) return null;
- const entityId = parseInt(entityIdResult, 10);
- if (isNaN(entityId)) return null;
-
- // Step 2: Batch all observations and tags in one spawn
- const statements = [];
-
- for (const obs of observations) {
- statements.push({
- query: 'INSERT INTO observations (entity_id, content, created_at) VALUES (?, ?, ?)',
- params: [entityId, obs, now],
- });
- }
-
- for (const tag of tags) {
- statements.push({
- query: 'INSERT INTO tags (entity_id, tag) VALUES (?, ?)',
- params: [entityId, tag],
- });
- }
-
- if (statements.length > 0) {
- const batchResult = sqliteBatch(dbPath, statements);
- if (batchResult === null) {
- // Clean up orphaned entity — batch failed so observations/tags rolled back
- logError('sqliteBatchEntity', new Error(`Batch failed for entity ${entity.name}, cleaning up orphan`));
- sqliteQuery(dbPath, 'DELETE FROM entities WHERE id = ?', [entityId]);
- return null;
- }
- }
-
- return entityId;
- } catch (error) {
- logError('sqliteBatchEntity', error);
- return null;
- }
-}
-
-// ============================================================================
-// Async File I/O
-// ============================================================================
-
-/**
- * Write JSON file asynchronously (non-blocking).
- * Returns a promise so callers can await if needed.
- * @param {string} filePath - Path to JSON file
- * @param {*} data - Data to write
- * @returns {Promise} True if successful
- */
-export function writeJSONFileAsync(filePath, data) {
- return new Promise((resolve) => {
- const content = JSON.stringify(data, null, 2);
- fs.writeFile(filePath, content, 'utf-8', (err) => {
- if (err) {
- logError(`Async write error ${path.basename(filePath)}`, err);
- }
- resolve(!err);
- });
- });
-}
-
-// Ensure state directory exists on module load
-ensureDir(STATE_DIR);
-
-// ============================================================================
-// Plan-Aware Memory Hooks (Beta)
-// ============================================================================
-
-/** File path patterns that indicate a plan file (requires docs/ context) */
-const PLAN_PATTERNS = [
- /docs\/plans\/.*\.md$/,
- /docs\/.*-design\.md$/,
- /docs\/.*-plan\.md$/,
-];
-
-/**
- * Check if a file path matches plan file patterns.
- * @param {string} filePath - File path to check
- * @returns {boolean}
- */
-export function isPlanFile(filePath) {
- if (!filePath) return false;
- return PLAN_PATTERNS.some(p => p.test(filePath));
-}
-
-/** Common English stop words to filter from tokenization */
-const STOP_WORDS = new Set([
- 'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
- 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
- 'should', 'may', 'might', 'shall', 'can', 'need', 'dare', 'ought',
- 'used', 'to', 'of', 'in', 'for', 'on', 'with', 'at', 'by', 'from',
- 'as', 'into', 'through', 'during', 'before', 'after', 'above', 'below',
- 'between', 'out', 'off', 'over', 'under', 'again', 'further', 'then',
- 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'both',
- 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
- 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 'just',
- 'because', 'but', 'and', 'or', 'if', 'while', 'that', 'this', 'these',
- 'those', 'it', 'its', 'up', 'set',
-]);
-
-/**
- * Tokenize text into lowercase meaningful words.
- * Removes punctuation, stop words, and words shorter than 3 characters.
- * @param {string} text - Input text
- * @returns {string[]} Array of meaningful words
- */
-export function tokenize(text) {
- if (!text) return [];
- return text
- .toLowerCase()
- .replace(/[^\w\s]/g, ' ')
- .split(/\s+/)
- .filter(w => w.length > 2 && !STOP_WORDS.has(w));
-}
-
-/**
- * Extract module/file hints from a step description.
- * Returns words that could match file paths or module names.
- * @param {string} description - Step description text
- * @returns {string[]} Module hint words
- */
-export function extractModuleHints(description) {
- return tokenize(description);
-}
-
-/**
- * Derive a human-readable plan name from a file path.
- * Strips date prefixes (YYYY-MM-DD-) and .md extension.
- * @param {string} filePath - File path
- * @returns {string} Plan name
- */
-export function derivePlanName(filePath) {
- let name = path.basename(filePath, '.md');
- // Remove date prefix (YYYY-MM-DD-)
- name = name.replace(/^\d{4}-\d{2}-\d{2}-/, '');
- return name;
-}
-
-/**
- * Parse plan steps from markdown content.
- * Supports checkbox format (- [ ] ...) and heading format (## Step N: ...).
- * @param {string} content - Markdown file content
- * @returns {Array<{number: number, description: string, completed: boolean}>}
- */
-export function parsePlanSteps(content) {
- if (!content) return [];
-
- const steps = [];
- const lines = content.split('\n');
- let inCodeFence = false;
-
- for (const line of lines) {
- const trimmed = line.trim();
-
- // Track code fence boundaries (``` with optional language tag)
- if (/^`{3,}/.test(trimmed)) {
- inCodeFence = !inCodeFence;
- continue;
- }
- if (inCodeFence) continue;
-
- // Format A: Checkbox "- [ ] Step N: description" or "- [ ] description"
- const checkboxMatch = trimmed.match(/^-\s+\[([ xX])\]\s+(?:(?:Step|Task)\s+\d+\s*[:.]\s*)?(.+)/);
- if (checkboxMatch) {
- steps.push({
- number: steps.length + 1,
- description: checkboxMatch[2].trim(),
- completed: checkboxMatch[1].toLowerCase() === 'x',
- });
- continue;
- }
-
- // Format B: Heading "## Step N: description" or "### Task N: description"
- const headingStepMatch = trimmed.match(/^#{2,4}\s+(?:Step|Task)\s+(\d+)\s*[:.]\s*(.+)/);
- if (headingStepMatch) {
- steps.push({
- number: parseInt(headingStepMatch[1], 10),
- description: headingStepMatch[2].trim(),
- completed: false,
- });
- continue;
- }
-
- // Format C: Numbered heading "### 1. description"
- const numberedMatch = trimmed.match(/^#{2,4}\s+(\d+)\.\s+(.+)/);
- if (numberedMatch) {
- steps.push({
- number: parseInt(numberedMatch[1], 10),
- description: numberedMatch[2].trim(),
- completed: false,
- });
- continue;
- }
- }
-
- return steps;
-}
-
-/**
- * Match a commit to the best matching uncompleted plan step.
- * Uses keyword overlap + file path hints. Threshold: 0.3.
- * @param {{ subject: string, filesChanged: string[] }} commitInfo
- * @param {Array<{ number: number, description: string, completed: boolean }>} planSteps
- * @returns {{ step: object, confidence: number } | null}
- */
-export function matchCommitToStep(commitInfo, planSteps) {
- if (!planSteps || planSteps.length === 0) return null;
- if (!commitInfo || !commitInfo.subject) return null;
-
- const commitWords = tokenize(commitInfo.subject);
- if (commitWords.length === 0) return null;
-
- let bestMatch = null;
- let bestScore = 0;
-
- for (const step of planSteps) {
- if (step.completed) continue;
-
- const stepWords = tokenize(step.description);
- if (stepWords.length === 0) continue;
-
- // Keyword overlap score (0~1)
- const overlap = commitWords.filter(w => stepWords.includes(w));
- let score = overlap.length / stepWords.length;
-
- // File path bonus (+0.3)
- const moduleHints = extractModuleHints(step.description);
- const filesChanged = commitInfo.filesChanged || [];
- const fileMatch = filesChanged.some(f =>
- moduleHints.some(hint => f.toLowerCase().includes(hint))
- );
- if (fileMatch) score += 0.3;
-
- if (score > bestScore && score > 0.3) {
- bestScore = score;
- bestMatch = step;
- }
- }
-
- if (!bestMatch) return null;
-
- // Return step + confidence (capped at 1.0)
- return { step: bestMatch, confidence: Math.min(bestScore, 1.0) };
-}
-
-/**
- * Render a full Style B timeline visualization.
- * @param {Object} plan - Plan entity with metadata.stepsDetail
- * @param {number} [highlightStep] - Step number to highlight (just matched)
- * @returns {string} Multi-line timeline string
- */
-export function renderTimeline(plan, highlightStep = null) {
- const { stepsDetail, totalSteps, completed = 0 } = plan.metadata;
- if (!stepsDetail || stepsDetail.length === 0 || !totalSteps) return '';
-
- const pct = Math.round((completed / totalSteps) * 100);
- const planName = plan.name.replace('Plan: ', '');
- const nextStep = stepsDetail.find(st => !st.completed);
-
- // Node symbols: ◉ highlighted (just-matched), ● completed, ◉ next, ○ pending
- const nodes = stepsDetail.map(s => {
- if (s.number === highlightStep) return '\u25c9';
- if (s.completed) return '\u25cf';
- if (nextStep && s.number === nextStep.number) return '\u25c9';
- return '\u25cb';
- }).join(' \u2500\u2500\u2500\u2500 ');
-
- // Step numbers row
- const numbers = stepsDetail.map(s =>
- String(s.number).padEnd(6)
- ).join('');
-
- const separator = '\u2501'.repeat(40);
-
- const lines = [
- ` \ud83d\udccb ${planName}`,
- ` ${separator}`,
- ` ${nodes}`,
- ` ${numbers} ${pct}% done`,
- ` ${separator}`,
- ];
-
- if (highlightStep) {
- const commitRef = plan._lastCommit || '';
- const confidence = plan._matchConfidence || 1.0;
- const marker = confidence < 0.6 ? ' (?)' : '';
- lines.push(` \u2705 Step ${highlightStep} matched${marker} \u2190 ${commitRef}`);
- }
-
- if (nextStep && completed < totalSteps) {
- lines.push(` \u25b6 Next: Step ${nextStep.number} - ${nextStep.description}`);
- }
-
- if (completed === totalSteps) {
- lines.push(` \ud83c\udf89 Plan complete!`);
- }
-
- return lines.join('\n');
-}
-
-/**
- * Render a compact Style B timeline for session-start display.
- * @param {Object} plan - Plan entity with metadata.stepsDetail
- * @returns {string} 3-line compact timeline string
- */
-export function renderTimelineCompact(plan) {
- const { stepsDetail, totalSteps, completed = 0 } = plan.metadata;
- if (!stepsDetail || stepsDetail.length === 0 || !totalSteps) return '';
-
- const pct = Math.round((completed / totalSteps) * 100);
- const planName = plan.name.replace('Plan: ', '');
-
- const nodes = stepsDetail.map(s =>
- s.completed ? '\u25cf' : '\u25cb'
- ).join(' \u2500\u2500\u2500\u2500 ');
-
- const next = stepsDetail.find(s => !s.completed);
-
- return [
- ` \ud83d\udccb ${planName}`,
- ` ${nodes} ${pct}%`,
- next ? ` \u25b6 Next: ${next.description}` : ` \ud83c\udf89 Complete`,
- ].join('\n');
-}
-
-// ============================================================================
-// Plan DB Operations
-// ============================================================================
-
-/**
- * Query active plan entities from KG.
- * @param {string} dbPath - Path to SQLite database
- * @returns {Array<{name: string, metadata: object}>}
- */
-export function queryActivePlans(dbPath) {
- try {
- if (!fs.existsSync(dbPath)) return [];
-
- const rows = sqliteQueryJSON(dbPath,
- `SELECT e.name, e.metadata FROM entities e
- JOIN tags t ON t.entity_id = e.id
- WHERE e.type = ? AND t.tag = ?`,
- ['workflow_checkpoint', 'active']
- );
-
- if (!rows) return [];
-
- return rows.map(row => ({
- name: row.name,
- metadata: typeof row.metadata === 'string' ? JSON.parse(row.metadata || '{}') : row.metadata,
- }));
- } catch (error) {
- logError('queryActivePlans', error);
- return [];
- }
-}
-
-/**
- * Add an observation to an existing entity.
- * @param {string} dbPath - Path to SQLite database
- * @param {string} entityName - Entity name
- * @param {string} content - Observation content
- * @returns {boolean}
- */
-export function addObservation(dbPath, entityName, content) {
- const result = sqliteQuery(dbPath,
- `INSERT INTO observations (entity_id, content, created_at)
- SELECT id, ?, ? FROM entities WHERE name = ?`,
- [content, new Date().toISOString(), entityName]
- );
- return result !== null;
-}
-
-/**
- * Update an entity's metadata JSON.
- * @param {string} dbPath - Path to SQLite database
- * @param {string} entityName - Entity name
- * @param {object} metadata - New metadata object
- * @returns {boolean}
- */
-export function updateEntityMetadata(dbPath, entityName, metadata) {
- const result = sqliteQuery(dbPath,
- 'UPDATE entities SET metadata = ? WHERE name = ?',
- [JSON.stringify(metadata), entityName]
- );
- return result !== null;
-}
-
-/**
- * Replace a tag on an entity.
- * @param {string} dbPath - Path to SQLite database
- * @param {string} entityName - Entity name
- * @param {string} oldTag - Tag to replace
- * @param {string} newTag - New tag value
- * @returns {boolean}
- */
-export function updateEntityTag(dbPath, entityName, oldTag, newTag) {
- const result = sqliteQuery(dbPath,
- `UPDATE tags SET tag = ? WHERE tag = ? AND entity_id = (SELECT id FROM entities WHERE name = ?)`,
- [newTag, oldTag, entityName]
- );
- return result !== null;
-}
-
-/**
- * Create a relation between two entities.
- * @param {string} dbPath - Path to SQLite database
- * @param {string} fromName - Source entity name
- * @param {string} toName - Target entity name
- * @param {string} relationType - Relation type (e.g. 'depends_on')
- * @returns {boolean}
- */
-export function createRelation(dbPath, fromName, toName, relationType) {
- const result = sqliteQuery(dbPath,
- `INSERT OR IGNORE INTO relations (from_entity_id, to_entity_id, relation_type, created_at)
- SELECT f.id, t.id, ?, ?
- FROM entities f, entities t
- WHERE f.name = ? AND t.name = ?`,
- [relationType, new Date().toISOString(), fromName, toName]
- );
- return result !== null;
-}
diff --git a/scripts/hooks/post-commit.js b/scripts/hooks/post-commit.js
old mode 100644
new mode 100755
index 4181bf13..3e3adc2d
--- a/scripts/hooks/post-commit.js
+++ b/scripts/hooks/post-commit.js
@@ -1,307 +1,140 @@
#!/usr/bin/env node
-/**
- * PostCommit Hook - Auto-save commit context to MeMesh Knowledge Graph
- *
- * Registered as a PostToolUse hook. Detects git commit commands and
- * saves the commit details (message, files changed, diff summary)
- * to the MeMesh knowledge graph for future recall.
- *
- * Commit saving runs silently. Plan progress prints a timeline to stderr.
- */
-
-import {
- MEMESH_DB_PATH,
- readStdin,
- sqliteBatchEntity,
- getDateString,
- logError,
- logMemorySave,
- queryActivePlans,
- matchCommitToStep,
- addObservation,
- updateEntityMetadata,
- updateEntityTag,
- createRelation,
- renderTimeline,
-} from './hook-utils.js';
-import fs from 'fs';
-import { execFileSync } from 'child_process';
-
-// ============================================================================
-// Git Commit Detection
-// ============================================================================
-
-/**
- * Check if a Bash tool call is a git commit
- * @param {Object} toolData - Normalized tool data
- * @returns {boolean}
- */
-function isGitCommit(toolData) {
- if (toolData.toolName !== 'Bash') return false;
- if (!toolData.success) return false;
-
- const cmd = toolData.arguments?.command || '';
- return /git\s+commit\s/.test(cmd) && !cmd.includes('--amend');
-}
-
-// ============================================================================
-// Git Info Extraction
-// ============================================================================
-
-/**
- * Extract latest commit details using git CLI
- * @returns {{ hash: string, subject: string, body: string, filesChanged: string[], diffStat: string } | null}
- */
-function getLatestCommitInfo() {
- try {
- // Get commit hash and message
- const logOutput = execFileSync('git', [
- 'log', '-1',
- '--format=%H%n%s%n%b',
- ], { encoding: 'utf-8', timeout: 5000 }).trim();
-
- const lines = logOutput.split('\n');
- const hash = lines[0] || '';
- const subject = lines[1] || '';
- const body = lines.slice(2).join('\n').trim();
-
- // Get files changed
- const diffNameOnly = execFileSync('git', [
- 'diff-tree', '--no-commit-id', '--name-only', '-r', 'HEAD',
- ], { encoding: 'utf-8', timeout: 5000 }).trim();
-
- const filesChanged = diffNameOnly ? diffNameOnly.split('\n').filter(Boolean) : [];
-
- // Get diff stat (compact summary)
- const diffStat = execFileSync('git', [
- 'diff-tree', '--no-commit-id', '--stat', 'HEAD',
- ], { encoding: 'utf-8', timeout: 5000 }).trim();
-
- return { hash, subject, body, filesChanged, diffStat };
- } catch (error) {
- logError('getLatestCommitInfo', error);
- return null;
- }
-}
-
-// ============================================================================
-// MeMesh KG Save
-// ============================================================================
-
-/**
- * Save commit context to MeMesh knowledge graph.
- * Uses sqliteBatchEntity for performance (3 spawns instead of 8+).
- * @param {Object} commitInfo - Commit details
- * @returns {boolean} True if saved
- */
-function saveCommitToKG(commitInfo) {
+import { createRequire } from 'module';
+import { homedir } from 'os';
+import { join, basename } from 'path';
+import { existsSync, mkdirSync } from 'fs';
+
+const require = createRequire(import.meta.url);
+
+const SCHEMA_SQL = `
+CREATE TABLE IF NOT EXISTS entities (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL UNIQUE,
+ type TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ metadata JSON
+);
+
+CREATE TABLE IF NOT EXISTS observations (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ entity_id INTEGER NOT NULL,
+ content TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
+);
+
+CREATE TABLE IF NOT EXISTS relations (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ from_entity_id INTEGER NOT NULL,
+ to_entity_id INTEGER NOT NULL,
+ relation_type TEXT NOT NULL,
+ metadata JSON,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (from_entity_id) REFERENCES entities(id) ON DELETE CASCADE,
+ FOREIGN KEY (to_entity_id) REFERENCES entities(id) ON DELETE CASCADE,
+ UNIQUE(from_entity_id, to_entity_id, relation_type)
+);
+
+CREATE TABLE IF NOT EXISTS tags (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ entity_id INTEGER NOT NULL,
+ tag TEXT NOT NULL,
+ FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
+);
+
+CREATE INDEX IF NOT EXISTS idx_tags_entity ON tags(entity_id);
+CREATE INDEX IF NOT EXISTS idx_tags_tag ON tags(tag);
+CREATE INDEX IF NOT EXISTS idx_observations_entity ON observations(entity_id);
+CREATE INDEX IF NOT EXISTS idx_relations_from ON relations(from_entity_id);
+CREATE INDEX IF NOT EXISTS idx_relations_to ON relations(to_entity_id);
+
+CREATE VIRTUAL TABLE IF NOT EXISTS entities_fts USING fts5(
+ name, observations, content='',
+ tokenize='unicode61 remove_diacritics 1'
+);
+`;
+
+let input = '';
+process.stdin.setEncoding('utf8');
+process.stdin.on('data', (chunk) => { input += chunk; });
+process.stdin.on('end', () => {
try {
- if (!fs.existsSync(MEMESH_DB_PATH)) {
- return false;
- }
-
- const { hash, subject, body, filesChanged, diffStat } = commitInfo;
- const shortHash = hash.substring(0, 7);
- const entityName = `Commit ${shortHash}: ${subject}`;
-
- // Build observations
- const observations = [];
- observations.push(`Commit: ${shortHash} - ${subject}`);
-
- if (body) {
- observations.push(`Details: ${body.substring(0, 200)}`);
- }
-
- if (filesChanged.length > 0) {
- const fileList = filesChanged.length <= 10
- ? filesChanged.join(', ')
- : `${filesChanged.slice(0, 10).join(', ')} (+${filesChanged.length - 10} more)`;
- observations.push(`Files changed (${filesChanged.length}): ${fileList}`);
- }
-
- // Group files by directory for context
- if (filesChanged.length > 0) {
- const dirs = {};
- filesChanged.forEach(f => {
- const dir = f.split('/').slice(0, 2).join('/');
- dirs[dir] = (dirs[dir] || 0) + 1;
- });
- const areaSummary = Object.entries(dirs)
- .sort((a, b) => b[1] - a[1])
- .slice(0, 5)
- .map(([dir, count]) => `${dir} (${count})`)
- .join(', ');
- observations.push(`Areas: ${areaSummary}`);
- }
-
- if (diffStat) {
- const statLines = diffStat.split('\n');
- const summaryLine = statLines[statLines.length - 1]?.trim();
- if (summaryLine) {
- observations.push(`Stats: ${summaryLine}`);
+ const data = JSON.parse(input);
+
+ // Only process Bash tool outputs
+ if (data.tool_name !== 'Bash') return exit0();
+ const toolOutput = typeof data.tool_output === 'string'
+ ? data.tool_output
+ : JSON.stringify(data.tool_output || '');
+
+ // Detect git commit in output
+ // Pattern: [branch hash] commit message
+ const commitMatch = toolOutput.match(/\[[\w/.-]+ ([a-f0-9]{7,})\] (.+)/);
+ if (!commitMatch) return exit0();
+
+ const commitHash = commitMatch[1];
+ const commitMsg = commitMatch[2];
+ const projectName = basename(data.cwd || process.cwd());
+
+ // Open database (create dir if needed)
+ const dbPath = process.env.MEMESH_DB_PATH || join(homedir(), '.memesh', 'knowledge-graph.db');
+ const dbDir = process.env.MEMESH_DB_PATH
+ ? join(process.env.MEMESH_DB_PATH, '..')
+ : join(homedir(), '.memesh');
+ if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
+
+ const Database = require('better-sqlite3');
+ const db = new Database(dbPath);
+ try {
+ db.pragma('journal_mode = WAL');
+ db.pragma('foreign_keys = ON');
+
+ // Ensure schema exists
+ db.exec(SCHEMA_SQL);
+
+ const entityName = `commit-${commitHash}`;
+
+ // Check if this is a new or existing entity
+ const insertResult = db.prepare('INSERT OR IGNORE INTO entities (name, type) VALUES (?, ?)').run(entityName, 'commit');
+ const isNew = insertResult.changes > 0;
+ const entity = db.prepare('SELECT id FROM entities WHERE name = ?').get(entityName);
+ if (entity) {
+ // Capture existing observations for FTS delete (before inserting new one)
+ const prevObs = isNew
+ ? []
+ : db.prepare('SELECT content FROM observations WHERE entity_id = ?').all(entity.id);
+ const prevObsText = isNew ? undefined : prevObs.map(o => o.content).join(' ');
+
+ // Add observation
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(entity.id, commitMsg);
+
+ // Add project tag (check first since no unique constraint)
+ const projectTag = `project:${projectName}`;
+ const existingTag = db.prepare('SELECT id FROM tags WHERE entity_id = ? AND tag = ?').get(entity.id, projectTag);
+ if (!existingTag) {
+ db.prepare('INSERT INTO tags (entity_id, tag) VALUES (?, ?)').run(entity.id, projectTag);
+ }
+
+ // Update FTS index — delete old entry first if entity existed
+ if (prevObsText !== undefined) {
+ db.prepare("INSERT INTO entities_fts(entities_fts, rowid, name, observations) VALUES('delete', ?, ?, ?)").run(entity.id, entityName, prevObsText);
+ }
+ // Fetch all observations (including the one just added) for the new FTS entry
+ const allObs = db.prepare('SELECT content FROM observations WHERE entity_id = ?').all(entity.id);
+ const allObsText = allObs.map(o => o.content).join(' ');
+ db.prepare('INSERT INTO entities_fts(rowid, name, observations) VALUES(?, ?, ?)').run(entity.id, entityName, allObsText);
}
+ } finally {
+ db.close();
}
-
- // Batch: entity + observations + tags in 2 process spawns (was 8+)
- const metadata = JSON.stringify({
- hash: shortHash,
- fullHash: hash,
- filesCount: filesChanged.length,
- source: 'post-commit-hook',
- });
-
- const tags = ['commit', 'auto-tracked', `date:${getDateString()}`, 'scope:project'];
-
- const entityId = sqliteBatchEntity(
- MEMESH_DB_PATH,
- { name: entityName, type: 'commit', metadata },
- observations,
- tags
- );
-
- if (entityId === null) return false;
-
- logMemorySave(`Commit saved: ${shortHash} - ${subject} (${filesChanged.length} files)`);
- return true;
- } catch (error) {
- logError('saveCommitToKG', error);
- return false;
+ } catch (err) {
+ // Never crash Claude Code — but leave a trace for debugging
+ try { process.stderr.write(`[memesh post-commit] ${err?.message || err}\n`); } catch {}
}
-}
+ exit0();
+});
-// ============================================================================
-// Plan Progress Tracking (Beta)
-// ============================================================================
-
-/**
- * Match commit to active plan steps and update progress.
- * Prints Style B timeline to stderr (visible to user).
- * @param {Object} commitInfo - Commit details from git
- * @param {boolean} commitEntitySaved - Whether commit entity was saved to KG
- */
-function updatePlanProgress(commitInfo, commitEntitySaved = false) {
- try {
- const activePlans = queryActivePlans(MEMESH_DB_PATH);
- if (activePlans.length === 0) return;
-
- const { hash, subject, filesChanged } = commitInfo;
- const shortHash = hash.substring(0, 7);
- const commitEntityName = `Commit ${shortHash}: ${subject}`;
-
- for (const plan of activePlans) {
- const stepsDetail = plan.metadata?.stepsDetail;
- if (!stepsDetail || stepsDetail.length === 0) continue;
-
- const matchResult = matchCommitToStep(
- { subject, filesChanged },
- stepsDetail
- );
-
- if (!matchResult) continue;
-
- const { step: matched, confidence } = matchResult;
-
- // Update step as completed
- const updatedSteps = stepsDetail.map(s =>
- s.number === matched.number
- ? { ...s, completed: true, commitHash: shortHash, date: getDateString(), confidence }
- : s
- );
- const rawCompleted = updatedSteps.filter(s => s.completed).length;
- const newCompleted = Math.min(rawCompleted, plan.metadata.totalSteps);
-
- // Update entity metadata
- updateEntityMetadata(MEMESH_DB_PATH, plan.name, {
- ...plan.metadata,
- completed: newCompleted,
- stepsDetail: updatedSteps,
- status: newCompleted >= plan.metadata.totalSteps ? 'completed' : 'active',
- });
-
- // Add completion observation
- addObservation(MEMESH_DB_PATH, plan.name,
- `\u2705 Step ${matched.number} completed by ${shortHash} (${getDateString()})`
- );
-
- // Create relation: commit → plan (only if commit entity exists in KG)
- if (commitEntitySaved) {
- createRelation(MEMESH_DB_PATH, commitEntityName, plan.name, 'depends_on');
- }
-
- // If all steps completed, swap tag and create lesson_learned
- if (newCompleted === plan.metadata.totalSteps) {
- updateEntityTag(MEMESH_DB_PATH, plan.name, 'active', 'completed');
- addObservation(MEMESH_DB_PATH, plan.name,
- `\ud83c\udf89 Plan completed on ${getDateString()}`
- );
-
- // Auto-create lesson_learned entity for the completed plan
- const planName = plan.name.replace('Plan: ', '');
- const completedSteps = updatedSteps.filter(s => s.completed);
- const lessonObs = [
- `Plan "${planName}" completed (${plan.metadata.totalSteps} steps)`,
- `Steps: ${completedSteps.map(s => s.description).join(', ')}`,
- `Commits: ${completedSteps.filter(s => s.commitHash).map(s => s.commitHash).join(', ')}`,
- ];
- sqliteBatchEntity(MEMESH_DB_PATH,
- { name: `Lesson: ${planName} completed`, type: 'lesson_learned', metadata: '{}' },
- lessonObs,
- ['lesson', 'plan-completion', `plan:${planName}`, 'scope:project']
- );
- }
-
- // Print timeline to stderr (visible to user — stdout goes to transcript)
- const timelinePlan = {
- ...plan,
- metadata: { ...plan.metadata, completed: newCompleted, stepsDetail: updatedSteps },
- _lastCommit: shortHash,
- _matchConfidence: confidence,
- };
- console.error('\n' + renderTimeline(timelinePlan, matched.number) + '\n');
-
- logMemorySave(`Plan progress: ${plan.name} ${newCompleted}/${plan.metadata.totalSteps}`);
- }
- } catch (error) {
- logError('updatePlanProgress', error);
- }
-}
-
-// ============================================================================
-// Main
-// ============================================================================
-
-async function postCommit() {
- try {
- const input = await readStdin(3000);
- if (!input || input.trim() === '') {
- process.exit(0);
- }
-
- const rawData = JSON.parse(input);
- const toolData = {
- toolName: rawData.tool_name || rawData.toolName || 'unknown',
- arguments: rawData.tool_input || rawData.arguments || {},
- success: rawData.success !== false,
- };
-
- // Only act on successful git commits
- if (!isGitCommit(toolData)) {
- process.exit(0);
- }
-
- // Extract commit info and save
- const commitInfo = getLatestCommitInfo();
- if (commitInfo) {
- const saved = saveCommitToKG(commitInfo);
- updatePlanProgress(commitInfo, saved);
- }
-
- process.exit(0);
- } catch (error) {
- logError('PostCommit', error);
- process.exit(0); // Never block Claude Code on hook errors
- }
+function exit0() {
+ process.exit(0);
}
-
-postCommit();
diff --git a/scripts/hooks/post-tool-use-recall-utils.js b/scripts/hooks/post-tool-use-recall-utils.js
deleted file mode 100644
index 18a351b4..00000000
--- a/scripts/hooks/post-tool-use-recall-utils.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Post-Tool-Use Recall Utilities
- *
- * Pure functions for detecting test failures and errors in tool output,
- * used by the proactive recall system in post-tool-use.js.
- */
-
-const TEST_PATTERNS = [
- /vitest\s*(run|watch)?/i,
- /jest\b/i,
- /npm\s+test/i,
- /npm\s+run\s+test/i,
- /npx\s+vitest/i,
- /npx\s+jest/i,
- /pytest\b/i,
- /bun\s+test/i,
- /mocha\b/i,
-];
-
-/**
- * Check if a command string is a test runner invocation.
- * @param {string} command - Shell command to check
- * @returns {boolean}
- */
-export function isTestCommand(command) {
- if (!command) return false;
- return TEST_PATTERNS.some(p => p.test(command));
-}
-
-/**
- * Extract test failure context (test name + error message) from test output.
- * Returns null if no failure is detected.
- * @param {string} output - Test runner stdout/stderr
- * @returns {{ testName: string, errorMessage: string } | null}
- */
-export function extractTestFailureContext(output) {
- if (!output) return null;
- const hasFailure = /FAIL|failed|failing|\u2715|\u2717|error/i.test(output);
- if (!hasFailure) return null;
-
- const fileMatch = output.match(/FAIL\s+(\S+\.(?:test|spec)\.\S+)/i)
- || output.match(/(\S+\.(?:test|spec)\.\S+)/i);
- const testName = fileMatch ? fileMatch[1] : 'unknown test';
-
- const errorMatch = output.match(/(?:Error|AssertionError|AssertError|expect).*$/m)
- || output.match(/(?:\u2715|\u2717)\s*(.+)$/m);
- const errorMessage = errorMatch ? errorMatch[0].trim() : 'test failed';
-
- return { testName, errorMessage };
-}
-
-/**
- * Build a search query from a test failure context.
- * Strips path and test/spec suffix from the test file name.
- * @param {string} testName - Test file name or path
- * @param {string} errorMessage - Error message
- * @returns {string}
- */
-export function buildTestFailureQuery(testName, errorMessage) {
- const shortName = testName.replace(/^.*[/\\]/, '').replace(/\.(test|spec)\.\w+$/, '');
- return `${shortName} ${errorMessage}`.trim();
-}
-
-/**
- * Build a search query from an error type and message.
- * Uses only the first line of the error message.
- * @param {string} errorType - Error class name (e.g. "TypeError")
- * @param {string} errorMessage - Error message (may be multiline)
- * @returns {string}
- */
-export function buildErrorQuery(errorType, errorMessage) {
- const firstLine = (errorMessage || '').split('\n')[0].trim();
- return `${errorType || 'Error'} ${firstLine}`.trim();
-}
diff --git a/scripts/hooks/post-tool-use.js b/scripts/hooks/post-tool-use.js
deleted file mode 100755
index 67e73c39..00000000
--- a/scripts/hooks/post-tool-use.js
+++ /dev/null
@@ -1,895 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * PostToolUse Hook - Claude Code Event-Driven Hooks
- *
- * Triggered after each tool execution in Claude Code.
- *
- * Features (Silent Observer):
- * - Reads tool execution data from stdin
- * - Detects patterns (READ_BEFORE_EDIT, Git workflows, Frontend work, Search patterns)
- * - Detects anomalies (slow execution, high tokens, failures, quota warnings)
- * - Updates recommendations.json incrementally
- * - Updates current-session.json
- * - Auto-saves key points to MeMesh when token threshold reached
- * - Runs silently (no console output - non-intrusive)
- */
-
-import {
- STATE_DIR,
- MEMESH_DB_PATH,
- THRESHOLDS,
- readJSONFile,
- writeJSONFile,
- writeJSONFileAsync,
- sqliteBatchEntity,
- readStdin,
- logError,
- logMemorySave,
- getDateString,
- isPlanFile,
- parsePlanSteps,
- derivePlanName,
- sqliteQueryJSON,
- updateEntityMetadata,
- addObservation,
-} from './hook-utils.js';
-import { isTestCommand, extractTestFailureContext, buildTestFailureQuery, buildErrorQuery } from './post-tool-use-recall-utils.js';
-import fs from 'fs';
-import path from 'path';
-
-// ============================================================================
-// File Paths
-// ============================================================================
-
-const RECOMMENDATIONS_FILE = path.join(STATE_DIR, 'recommendations.json');
-const CURRENT_SESSION_FILE = path.join(STATE_DIR, 'current-session.json');
-const SESSION_CONTEXT_FILE = path.join(STATE_DIR, 'session-context.json');
-
-// ============================================================================
-// Pattern Detection
-// ============================================================================
-
-/**
- * Pattern Detector - Analyzes tool usage patterns
- */
-class PatternDetector {
- constructor() {
- this.recentTools = [];
- }
-
- /**
- * Add a tool call to the recent tools list
- * @param {Object} toolData - Tool execution data
- */
- addToolCall(toolData) {
- this.recentTools.push({
- toolName: toolData.toolName,
- args: toolData.arguments,
- timestamp: new Date().toISOString(),
- });
-
- // Keep only last 10 (using slice instead of shift for better performance)
- if (this.recentTools.length > 10) {
- this.recentTools = this.recentTools.slice(-10);
- }
- }
-
- /**
- * Detect patterns in recent tool usage
- * @param {Object} currentTool - Current tool execution data
- * @returns {Array} Detected patterns
- */
- detectPatterns(currentTool) {
- const patterns = [];
-
- if (!currentTool || !currentTool.toolName) {
- return patterns;
- }
-
- const toolArgs = currentTool.arguments || {};
-
- // Pattern 1: READ_BEFORE_EDIT
- if (currentTool.toolName === 'Edit') {
- const filePath = toolArgs.file_path;
- if (filePath) {
- const recentReads = this.recentTools.filter(t =>
- t.toolName === 'Read' && t.args?.file_path === filePath
- );
- if (recentReads.length > 0) {
- patterns.push({
- type: 'READ_BEFORE_EDIT',
- description: 'Read before Edit - correct behavior',
- severity: 'info',
- });
- } else {
- patterns.push({
- type: 'EDIT_WITHOUT_READ',
- description: `Edit ${path.basename(filePath)} without prior Read`,
- severity: 'warning',
- });
- }
- }
- }
-
- // Pattern 2: Git Workflow
- const gitCommands = ['git add', 'git commit', 'git push', 'git branch', 'git checkout'];
- if (currentTool.toolName === 'Bash' && toolArgs.command) {
- const cmd = toolArgs.command;
- if (gitCommands.some(gitCmd => cmd.includes(gitCmd))) {
- const recentGitOps = this.recentTools.filter(t =>
- t.toolName === 'Bash' && gitCommands.some(gc => t.args?.command?.includes(gc))
- ).length;
-
- if (recentGitOps >= 2) {
- patterns.push({
- type: 'GIT_WORKFLOW',
- description: `Git workflow detected (${recentGitOps + 1} operations)`,
- severity: 'info',
- suggestion: 'Consider loading devops-git-workflows skill',
- });
- }
- }
- }
-
- // Pattern 3: Frontend Work
- const frontendExtensions = ['.tsx', '.jsx', '.vue', '.svelte', '.css', '.scss'];
- if (['Edit', 'Write', 'Read'].includes(currentTool.toolName)) {
- const filePath = toolArgs.file_path;
- if (filePath && frontendExtensions.some(ext => filePath.endsWith(ext))) {
- const recentFrontendOps = this.recentTools.filter(t =>
- ['Edit', 'Write', 'Read'].includes(t.toolName) &&
- frontendExtensions.some(ext => t.args?.file_path?.endsWith(ext))
- ).length;
-
- if (recentFrontendOps >= 2) {
- patterns.push({
- type: 'FRONTEND_WORK',
- description: `Frontend work detected (${recentFrontendOps + 1} files)`,
- severity: 'info',
- suggestion: 'Consider loading frontend-design skill',
- });
- }
- }
- }
-
- // Pattern 4: Intensive Search
- if (['Grep', 'Glob'].includes(currentTool.toolName)) {
- const recentSearches = this.recentTools.filter(t =>
- ['Grep', 'Glob'].includes(t.toolName)
- ).length;
-
- if (recentSearches >= 3) {
- patterns.push({
- type: 'INTENSIVE_SEARCH',
- description: `Multiple search operations (${recentSearches + 1} times)`,
- severity: 'info',
- });
- }
- }
-
- return patterns;
- }
-}
-
-// ============================================================================
-// Anomaly Detection
-// ============================================================================
-
-/**
- * Detect anomalies in tool execution
- * @param {Object} toolData - Tool execution data
- * @param {Object} sessionContext - Session context with quota info
- * @returns {Array} Detected anomalies
- */
-function detectAnomalies(toolData, sessionContext) {
- const anomalies = [];
-
- // Anomaly 1: Slow Execution
- if (toolData.duration && toolData.duration > THRESHOLDS.SLOW_EXECUTION) {
- anomalies.push({
- type: 'SLOW_EXECUTION',
- description: `${toolData.toolName} took ${(toolData.duration / 1000).toFixed(1)}s (slow)`,
- severity: 'warning',
- });
- }
-
- // Anomaly 2: High Token Usage
- if (toolData.tokensUsed && toolData.tokensUsed > THRESHOLDS.HIGH_TOKENS) {
- anomalies.push({
- type: 'HIGH_TOKENS',
- description: `${toolData.toolName} used ${toolData.tokensUsed} tokens (high usage)`,
- severity: 'warning',
- });
- }
-
- // Anomaly 3: Execution Failure
- if (toolData.success === false) {
- anomalies.push({
- type: 'EXECUTION_FAILURE',
- description: `${toolData.toolName} execution failed`,
- severity: 'error',
- });
- }
-
- // Anomaly 4: Quota Warning
- if (sessionContext.tokenQuota) {
- const quotaUsed = sessionContext.tokenQuota.used + (toolData.tokensUsed || 0);
- const quotaPercentage = quotaUsed / sessionContext.tokenQuota.limit;
-
- if (quotaPercentage > THRESHOLDS.QUOTA_WARNING) {
- anomalies.push({
- type: 'QUOTA_WARNING',
- description: `Token quota at ${(quotaPercentage * 100).toFixed(1)}%`,
- severity: 'warning',
- });
- }
- }
-
- return anomalies;
-}
-
-// ============================================================================
-// Recommendations Update
-// ============================================================================
-
-/**
- * Update recommendations based on detected patterns and anomalies
- * @param {Array} patterns - Detected patterns
- * @param {Array} anomalies - Detected anomalies
- */
-function updateRecommendations(patterns, anomalies) {
- const recommendations = readJSONFile(RECOMMENDATIONS_FILE, {
- recommendedSkills: [],
- detectedPatterns: [],
- warnings: [],
- lastUpdated: null,
- });
-
- // Add new skills based on patterns
- patterns.forEach(pattern => {
- if (pattern.suggestion && pattern.suggestion.includes('skill')) {
- const skillMatch = pattern.suggestion.match(/loading\s+(.+?)\s+skill/);
- if (skillMatch) {
- const skillName = skillMatch[1];
- const existing = recommendations.recommendedSkills.find(s => s.name === skillName);
- if (!existing) {
- recommendations.recommendedSkills.push({
- name: skillName,
- reason: pattern.description,
- priority: 'medium',
- });
- }
- }
- }
- });
-
- // Add detected patterns (keep last 10)
- patterns.forEach(pattern => {
- if (pattern && pattern.description) {
- recommendations.detectedPatterns.unshift({
- description: pattern.description,
- suggestion: pattern.suggestion || '',
- timestamp: new Date().toISOString(),
- });
- }
- });
- recommendations.detectedPatterns = recommendations.detectedPatterns.slice(0, 10);
-
- // Add warnings from anomalies (keep last 5)
- anomalies
- .filter(a => a.severity === 'warning' || a.severity === 'error')
- .forEach(anomaly => {
- recommendations.warnings.unshift(anomaly.description);
- });
- recommendations.warnings = recommendations.warnings.slice(0, 5);
-
- recommendations.lastUpdated = new Date().toISOString();
-
- // Async write — non-blocking, caller awaits via pendingWrites
- return writeJSONFileAsync(RECOMMENDATIONS_FILE, recommendations);
-}
-
-// ============================================================================
-// Session Update
-// ============================================================================
-
-/**
- * Update current session with tool call data
- * @param {Object} toolData - Tool execution data
- * @param {Array} patterns - Detected patterns
- * @param {Array} anomalies - Detected anomalies
- */
-function updateCurrentSession(toolData, patterns, anomalies) {
- const currentSession = readJSONFile(CURRENT_SESSION_FILE, {
- startTime: new Date().toISOString(),
- toolCalls: [],
- patterns: [],
- });
-
- // Add tool call record
- currentSession.toolCalls.push({
- timestamp: new Date().toISOString(),
- toolName: toolData.toolName,
- arguments: toolData.arguments,
- duration: toolData.duration,
- success: toolData.success,
- tokenUsage: toolData.tokensUsed,
- });
-
- // Cap toolCalls to prevent unbounded growth in long sessions
- const MAX_TOOL_CALLS = 1000;
- if (currentSession.toolCalls.length > MAX_TOOL_CALLS) {
- currentSession.toolCalls = currentSession.toolCalls.slice(-MAX_TOOL_CALLS);
- }
-
- // Track file modifications and test executions (for dry-run gate)
- trackFileModifications(toolData, currentSession);
- trackTestExecutions(toolData, currentSession);
-
- // Update pattern counts
- patterns.forEach(pattern => {
- const existing = currentSession.patterns.find(p => p.type === pattern.type);
- if (!existing) {
- currentSession.patterns.push({
- type: pattern.type,
- count: 1,
- firstDetected: new Date().toISOString(),
- });
- } else {
- existing.count++;
- }
- });
-
- // Async write — session file is read on next call, eventual consistency is fine
- // Return promise so caller can track it in pendingWrites
- const writePromise = writeJSONFileAsync(CURRENT_SESSION_FILE, currentSession);
-
- return { session: currentSession, writePromise };
-}
-
-/**
- * Update session context (quota tracking)
- * @param {Object} toolData - Tool execution data
- * @returns {{ sessionContext: Object, writePromise: Promise }}
- */
-function updateSessionContext(toolData) {
- const sessionContext = readJSONFile(SESSION_CONTEXT_FILE, {
- tokenQuota: { used: 0, limit: 200000 },
- learnedPatterns: [],
- lastSessionDate: null,
- lastSaveTokens: 0,
- });
-
- // Update token usage
- if (toolData.tokensUsed) {
- sessionContext.tokenQuota.used += toolData.tokensUsed;
- }
-
- sessionContext.lastSessionDate = new Date().toISOString();
-
- const writePromise = writeJSONFileAsync(SESSION_CONTEXT_FILE, sessionContext);
-
- return { sessionContext, writePromise };
-}
-
-// ============================================================================
-// MeMesh Key Points Auto-Save
-// ============================================================================
-
-/**
- * Extract key points from session state
- * @param {Object} sessionState - Current session state
- * @returns {Array} Extracted key points
- */
-function extractKeyPoints(sessionState) {
- const keyPoints = [];
-
- if (!sessionState?.toolCalls?.length) {
- return keyPoints;
- }
-
- // 1. Identify completed file operations
- const fileOps = {};
- sessionState.toolCalls.forEach(tc => {
- if (['Edit', 'Write'].includes(tc.toolName) && tc.arguments?.file_path) {
- const filePath = tc.arguments.file_path;
- fileOps[filePath] = (fileOps[filePath] || 0) + 1;
- }
- });
-
- const modifiedFiles = Object.keys(fileOps);
- if (modifiedFiles.length > 0) {
- const summary = modifiedFiles.length > 5
- ? `${modifiedFiles.slice(0, 5).map(f => path.basename(f)).join(', ')} (+${modifiedFiles.length - 5} more)`
- : modifiedFiles.map(f => path.basename(f)).join(', ');
- keyPoints.push(`[TASK] Modified files: ${summary}`);
- }
-
- // 2. Identify failures
- const failures = sessionState.toolCalls.filter(tc => tc.success === false);
- if (failures.length > 0) {
- const failedTools = [...new Set(failures.map(f => f.toolName))];
- keyPoints.push(`[PROBLEM] ${failures.length} tool failures: ${failedTools.join(', ')}`);
- }
-
- // 3. Git operations
- const gitCommits = sessionState.toolCalls.filter(tc =>
- tc.toolName === 'Bash' && tc.arguments?.command?.includes('git commit')
- );
- if (gitCommits.length > 0) {
- keyPoints.push(`[DECISION] Made ${gitCommits.length} git commit(s)`);
- }
-
- // 4. Detected patterns
- if (sessionState.patterns?.length > 0) {
- const patternSummary = sessionState.patterns
- .filter(p => p.count > 2)
- .map(p => `${p.type}(${p.count})`)
- .join(', ');
- if (patternSummary) {
- keyPoints.push(`[PATTERN] Recurring patterns: ${patternSummary}`);
- }
- }
-
- // 5. Work scope indicator
- const toolCounts = {};
- sessionState.toolCalls.forEach(tc => {
- toolCounts[tc.toolName] = (toolCounts[tc.toolName] || 0) + 1;
- });
- const topTools = Object.entries(toolCounts)
- .sort((a, b) => b[1] - a[1])
- .slice(0, 3)
- .map(([tool, count]) => `${tool}:${count}`)
- .join(', ');
- keyPoints.push(`[SCOPE] Tool usage: ${topTools}, total: ${sessionState.toolCalls.length}`);
-
- return keyPoints;
-}
-
-/**
- * Save conversation key points to MeMesh knowledge graph.
- * Uses sqliteBatchEntity for performance (3 spawns instead of N).
- * @param {Object} sessionState - Current session state
- * @param {Object} sessionContext - Session context
- * @returns {boolean} True if saved successfully
- */
-function saveConversationKeyPoints(sessionState, sessionContext) {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) {
- logError('saveConversationKeyPoints', `MeMesh DB not found: ${MEMESH_DB_PATH}`);
- return false;
- }
-
- const keyPoints = extractKeyPoints(sessionState);
- if (keyPoints.length === 0) {
- return false;
- }
-
- const entityName = `session_keypoints_${Date.now()}`;
-
- const metadata = JSON.stringify({
- tokensSaved: sessionContext.tokenQuota?.used || 0,
- toolCount: sessionState.toolCalls?.length || 0,
- saveReason: 'token_threshold',
- });
-
- const tags = ['auto_saved', 'token_trigger', getDateString()];
-
- const entityId = sqliteBatchEntity(
- MEMESH_DB_PATH,
- { name: entityName, type: 'session_keypoint', metadata },
- keyPoints,
- tags
- );
-
- if (entityId === null) {
- return false;
- }
-
- logMemorySave(`🧠 MeMesh: Saved ${keyPoints.length} key points (tokens: ${sessionContext.tokenQuota?.used})`);
-
- return true;
- } catch (error) {
- logError('saveConversationKeyPoints', error);
- return false;
- }
-}
-
-/**
- * Check if token threshold reached and save key points
- * @param {Object} sessionState - Current session state
- * @param {Object} sessionContext - Session context
- * @returns {boolean} True if saved
- */
-function checkAndSaveKeyPoints(sessionState, sessionContext) {
- try {
- const lastSaveTokens = sessionContext.lastSaveTokens || 0;
- const currentTokens = sessionContext.tokenQuota?.used || 0;
- const tokensSinceLastSave = currentTokens - lastSaveTokens;
-
- if (tokensSinceLastSave >= THRESHOLDS.TOKEN_SAVE) {
- const saved = saveConversationKeyPoints(sessionState, sessionContext);
-
- if (saved) {
- sessionContext.lastSaveTokens = currentTokens;
- writeJSONFile(SESSION_CONTEXT_FILE, sessionContext);
- }
-
- return saved;
- }
-
- return false;
- } catch (error) {
- logError('checkAndSaveKeyPoints', error);
- return false;
- }
-}
-
-// ============================================================================
-// File Modification & Test Tracking (for dry-run gate in pre-tool-use.js)
-// ============================================================================
-
-/**
- * Track file modifications from Write/Edit tool calls.
- * Stores modified file paths in session state.
- * @param {Object} toolData - Normalized tool data
- * @param {Object} currentSession - Current session state (mutated in place)
- */
-function trackFileModifications(toolData, currentSession) {
- if (!['Edit', 'Write'].includes(toolData.toolName)) return;
-
- const filePath = toolData.arguments?.file_path;
- if (!filePath) return;
-
- if (!currentSession.modifiedFiles) {
- currentSession.modifiedFiles = [];
- }
-
- const MAX_MODIFIED_FILES = 100;
- if (!currentSession.modifiedFiles.includes(filePath)) {
- if (currentSession.modifiedFiles.length >= MAX_MODIFIED_FILES) {
- currentSession.modifiedFiles.shift(); // Remove oldest entry
- }
- currentSession.modifiedFiles.push(filePath);
- }
-}
-
-/** Patterns that indicate test execution in a Bash command */
-const TEST_PATTERNS = [
- /vitest\s+(run|watch)?/,
- /jest\b/,
- /npm\s+test/,
- /npm\s+run\s+test/,
- /npx\s+vitest/,
- /npx\s+jest/,
- /tsc\s+--noEmit/,
- /node\s+--check\s/,
- /bun\s+test/,
- /pytest\b/,
-];
-
-/**
- * Track test executions from Bash tool calls.
- * Marks tested files/directories in session state.
- * @param {Object} toolData - Normalized tool data
- * @param {Object} currentSession - Current session state (mutated in place)
- */
-function trackTestExecutions(toolData, currentSession) {
- if (toolData.toolName !== 'Bash') return;
- if (!toolData.success) return;
-
- const cmd = toolData.arguments?.command || '';
- const isTest = TEST_PATTERNS.some(pattern => pattern.test(cmd));
- if (!isTest) return;
-
- if (!currentSession.testedFiles) {
- currentSession.testedFiles = [];
- }
-
- currentSession.lastTestRun = new Date().toISOString();
-
- // Extract test target path if provided
- // e.g., "vitest run src/auth" → mark all modified files under src/auth/ as tested
- const pathMatch = cmd.match(/(?:vitest|jest|node\s+--check)\s+(?:run\s+)?(\S+)/);
- const testTarget = pathMatch ? pathMatch[1] : null;
-
- if (testTarget && currentSession.modifiedFiles) {
- // Mark files under the test target directory/path as tested
- // Use path-prefix match: "src/auth" matches "src/auth/middleware.ts" but NOT "src/auth-utils/helper.ts"
- for (const modFile of currentSession.modifiedFiles) {
- const isMatch = modFile === testTarget ||
- modFile.startsWith(testTarget + '/') ||
- modFile.startsWith(testTarget + path.sep);
- if (isMatch && !currentSession.testedFiles.includes(modFile)) {
- currentSession.testedFiles.push(modFile);
- }
- }
- } else if (!testTarget && currentSession.modifiedFiles) {
- // Full test run (no specific target) — mark all modified files as tested
- for (const modFile of currentSession.modifiedFiles) {
- if (!currentSession.testedFiles.includes(modFile)) {
- currentSession.testedFiles.push(modFile);
- }
- }
- }
-}
-
-// ============================================================================
-// Code Review Tracking
-// ============================================================================
-
-/**
- * Check if this tool call is a code review invocation.
- * Detects both Skill tool usage and Task tool dispatching code reviewers.
- * @param {Object} toolData - Normalized tool data
- * @returns {boolean}
- */
-function isCodeReviewInvocation(toolData) {
- // Skill tool with code review
- if (toolData.toolName === 'Skill') {
- const name = toolData.arguments?.name || toolData.arguments?.skill_name || '';
- return /code.?review|comprehensive.?code.?review/i.test(name);
- }
-
- // Task tool dispatching code reviewer subagent
- if (toolData.toolName === 'Task') {
- const subagentType = toolData.arguments?.subagent_type || '';
- return /code.?review/i.test(subagentType);
- }
-
- return false;
-}
-
-/**
- * Mark code review as done in session state.
- * This flag is checked by pre-tool-use.js before git commits.
- */
-function markCodeReviewDone() {
- const session = readJSONFile(CURRENT_SESSION_FILE, {});
- session.codeReviewDone = true;
- session.codeReviewTimestamp = new Date().toISOString();
- writeJSONFile(CURRENT_SESSION_FILE, session);
-}
-
-// ============================================================================
-// Tool Data Normalization
-// ============================================================================
-
-/**
- * Normalize tool data from Claude Code format
- * @param {Object} raw - Raw tool data from stdin
- * @returns {Object} Normalized tool data
- */
-function normalizeToolData(raw) {
- return {
- toolName: raw.tool_name || raw.toolName || 'unknown',
- arguments: raw.tool_input || raw.arguments || {},
- duration: raw.duration_ms || raw.duration || 0,
- success: raw.success !== false,
- tokensUsed: raw.tokens_used || raw.tokensUsed || 0,
- _raw: raw,
- };
-}
-
-// ============================================================================
-// Plan File Detection (Beta)
-// ============================================================================
-
-/**
- * Detect plan file creation and save to KG.
- * Triggered when Write tool targets a plan file path.
- * @param {Object} toolData - Normalized tool data
- */
-function detectPlanFile(toolData) {
- if (toolData.toolName !== 'Write') return;
-
- const filePath = toolData.arguments?.file_path;
- if (!isPlanFile(filePath)) return;
-
- try {
- // Read the file content
- if (!fs.existsSync(filePath)) return;
- const content = fs.readFileSync(filePath, 'utf-8');
-
- const steps = parsePlanSteps(content);
- if (steps.length === 0) return;
-
- if (!fs.existsSync(MEMESH_DB_PATH)) return;
-
- const planName = derivePlanName(filePath);
- const entityName = `Plan: ${planName}`;
-
- const newMetadata = {
- sourceFile: filePath,
- totalSteps: steps.length,
- completed: steps.filter(s => s.completed).length,
- status: 'active',
- stepsDetail: steps,
- };
-
- // Check if plan entity already exists (re-save scenario)
- const existing = sqliteQueryJSON(MEMESH_DB_PATH,
- 'SELECT id FROM entities WHERE name = ?', [entityName]);
-
- if (existing && existing.length > 0) {
- // Upsert: update metadata and add observation for the re-save
- updateEntityMetadata(MEMESH_DB_PATH, entityName, newMetadata);
- addObservation(MEMESH_DB_PATH, entityName,
- `Plan re-saved: ${steps.length} steps (${steps.filter(s => s.completed).length} completed)`);
- logMemorySave(`Plan updated: ${planName} (${steps.length} steps)`);
- } else {
- // First save: create entity with observations and tags
- const observations = steps.map(s => `Step ${s.number}: ${s.description}`);
- const tags = ['plan', 'active', `plan:${planName}`, 'scope:project'];
-
- sqliteBatchEntity(MEMESH_DB_PATH,
- { name: entityName, type: 'workflow_checkpoint', metadata: JSON.stringify(newMetadata) },
- observations, tags
- );
- logMemorySave(`Plan detected: ${planName} (${steps.length} steps)`);
- }
- } catch (error) {
- logError('detectPlanFile', error);
- }
-}
-
-// ============================================================================
-// Proactive Recall on Test Failure / Error Detection
-// ============================================================================
-
-/**
- * Trigger proactive recall on test failure or error detection.
- * Writes results to proactive-recall.json for HookToolHandler.
- * @param {Object} toolData - Normalized tool data
- */
-function triggerProactiveRecall(toolData) {
- try {
- const recallFile = path.join(STATE_DIR, 'proactive-recall.json');
- let query = null;
- let trigger = null;
-
- // Test failure detection
- if (toolData.toolName === 'Bash' && toolData.arguments?.command) {
- if (isTestCommand(toolData.arguments.command) && !toolData.success) {
- const ctx = extractTestFailureContext(toolData._raw?.output || '');
- if (ctx) {
- query = buildTestFailureQuery(ctx.testName, ctx.errorMessage);
- trigger = 'test-failure';
- }
- }
- }
-
- // Error detection (non-test failures)
- if (!trigger && !toolData.success && toolData._raw?.output) {
- const errorMatch = toolData._raw.output.match(/(\w*Error):\s*(.+)/);
- if (errorMatch) {
- query = buildErrorQuery(errorMatch[1], errorMatch[2]);
- trigger = 'error-detection';
- }
- }
-
- if (!query || !trigger) return;
-
- // Build FTS5 query
- const ftsTokens = query.split(/\s+/)
- .filter(t => t.length > 2)
- .slice(0, 8)
- .map(t => `"${t.replace(/"/g, '""')}"*`)
- .join(' OR ');
-
- if (!ftsTokens) return;
-
- const sql = `
- SELECT e.name,
- (SELECT json_group_array(content) FROM observations o WHERE o.entity_id = e.id) as observations_json
- FROM entities e
- JOIN entities_fts ON entities_fts.rowid = e.id
- WHERE entities_fts MATCH ?
- ORDER BY bm25(entities_fts, 10.0, 5.0)
- LIMIT 3
- `;
-
- const result = sqliteQueryJSON(MEMESH_DB_PATH, sql, [ftsTokens]);
- if (!result || result.length === 0) return;
-
- const recallData = {
- trigger,
- query,
- timestamp: Date.now(),
- results: result.map(r => ({
- name: r.name,
- observations: JSON.parse(r.observations_json || '[]').filter(Boolean).slice(0, 2),
- })),
- };
-
- writeJSONFile(recallFile, recallData);
- } catch (error) {
- logError('proactive-recall-trigger', error);
- }
-}
-
-// ============================================================================
-// Main PostToolUse Logic
-// ============================================================================
-
-async function postToolUse() {
- try {
- // Read stdin with timeout
- const input = await readStdin(3000);
-
- if (!input || input.trim() === '') {
- process.exit(0);
- }
-
- // Parse and normalize tool data
- const rawData = JSON.parse(input);
- const toolData = normalizeToolData(rawData);
-
- // Track code review invocations (for pre-commit enforcement)
- if (isCodeReviewInvocation(toolData)) {
- markCodeReviewDone();
- }
-
- // Detect plan file creation (beta)
- detectPlanFile(toolData);
-
- // Initialize pattern detector
- const detector = new PatternDetector();
-
- // Load recent tools from current session
- const currentSession = readJSONFile(CURRENT_SESSION_FILE, { toolCalls: [] });
- currentSession.toolCalls.slice(-10).forEach(tc => {
- detector.addToolCall({
- toolName: tc.toolName,
- arguments: tc.arguments || {},
- });
- });
-
- // Add current tool
- detector.addToolCall(toolData);
-
- // Detect patterns
- const patterns = detector.detectPatterns(toolData);
-
- // Update session context (for quota tracking) — returns sync data + async write promise
- const { sessionContext, writePromise: contextWritePromise } = updateSessionContext(toolData);
-
- // Detect anomalies
- const anomalies = detectAnomalies(toolData, sessionContext);
-
- // Trigger proactive recall on test failure or error
- triggerProactiveRecall(toolData);
-
- // Fire async writes in parallel
- const pendingWrites = [contextWritePromise];
-
- // Update recommendations incrementally
- if (patterns.length > 0 || anomalies.length > 0) {
- pendingWrites.push(updateRecommendations(patterns, anomalies));
- }
-
- // Update current session (async write)
- const { session: updatedSession, writePromise: sessionWritePromise } =
- updateCurrentSession(toolData, patterns, anomalies);
- pendingWrites.push(sessionWritePromise);
-
- // Check token threshold and save key points if needed
- checkAndSaveKeyPoints(updatedSession, sessionContext);
-
- // Wait for all async writes to complete before exit
- await Promise.all(pendingWrites);
-
- // Silent exit
- process.exit(0);
- } catch (error) {
- logError('PostToolUse', error);
- process.exit(0); // Never block Claude Code on hook errors
- }
-}
-
-// ============================================================================
-// Execute
-// ============================================================================
-
-postToolUse();
diff --git a/scripts/hooks/pre-tool-use.js b/scripts/hooks/pre-tool-use.js
deleted file mode 100644
index 6986c03f..00000000
--- a/scripts/hooks/pre-tool-use.js
+++ /dev/null
@@ -1,491 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * PreToolUse Hook - Modular Handler Architecture
- *
- * Triggered before each tool execution in Claude Code.
- *
- * Handlers (each returns partial response or null):
- * 1. codeReviewHandler — git commit → review reminder
- * 2. routingHandler — Task → model/background selection
- * 3. planningHandler — Task(Plan)/EnterPlanMode → SDD+BDD template
- * 4. dryRunGateHandler — Task → untested code warning
- *
- * Response Merger combines all handler outputs into a single JSON response:
- * - updatedInput: deep-merged
- * - additionalContext: concatenated
- * - permissionDecision: most-restrictive-wins
- */
-
-import {
- HOME_DIR,
- STATE_DIR,
- readJSONFile,
- readStdin,
- logError,
-} from './hook-utils.js';
-import fs from 'fs';
-import path from 'path';
-import { fileURLToPath } from 'url';
-
-// ============================================================================
-// Constants
-// ============================================================================
-
-const CURRENT_SESSION_FILE = path.join(STATE_DIR, 'current-session.json');
-const ROUTING_CONFIG_FILE = path.join(HOME_DIR, '.memesh', 'routing-config.json');
-const ROUTING_AUDIT_LOG = path.join(HOME_DIR, '.memesh', 'routing-audit.log');
-const PLANNING_TEMPLATE_FILE = path.join(
- path.dirname(fileURLToPath(import.meta.url)),
- 'templates',
- 'planning-template.md'
-);
-
-// ============================================================================
-// Response Merger
-// ============================================================================
-
-/**
- * Deep-merge two objects (shallow for top-level, recursive for nested).
- * Later values override earlier ones.
- */
-function deepMerge(target, source) {
- if (!source) return target;
- if (!target) return source;
-
- const result = { ...target };
- for (const key of Object.keys(source)) {
- if (
- typeof result[key] === 'object' && result[key] !== null &&
- typeof source[key] === 'object' && source[key] !== null &&
- !Array.isArray(result[key])
- ) {
- result[key] = deepMerge(result[key], source[key]);
- } else {
- result[key] = source[key];
- }
- }
- return result;
-}
-
-/**
- * Get the most restrictive permission decision.
- * Priority: deny > ask > allow > undefined
- */
-function mostRestrictive(decisions) {
- const priority = { deny: 3, ask: 2, allow: 1 };
- let result = undefined;
- let maxPriority = 0;
-
- for (const d of decisions) {
- if (d && priority[d] > maxPriority) {
- maxPriority = priority[d];
- result = d;
- }
- }
- return result;
-}
-
-/**
- * Merge multiple handler responses into a single hook output.
- * @param {Array} responses - Handler responses
- * @returns {Object|null} Merged response or null if all handlers returned null
- */
-function mergeResponses(responses) {
- const valid = responses.filter(Boolean);
- if (valid.length === 0) return null;
-
- let mergedInput = undefined;
- const contextParts = [];
- const decisions = [];
-
- for (const r of valid) {
- if (r.updatedInput) {
- mergedInput = deepMerge(mergedInput, r.updatedInput);
- }
- if (r.additionalContext) {
- contextParts.push(r.additionalContext);
- }
- if (r.permissionDecision) {
- decisions.push(r.permissionDecision);
- }
- }
-
- const merged = {};
- if (mergedInput) merged.updatedInput = mergedInput;
- if (contextParts.length > 0) merged.additionalContext = contextParts.join('\n\n');
-
- const decision = mostRestrictive(decisions);
- if (decision) merged.permissionDecision = decision;
-
- return Object.keys(merged).length > 0 ? merged : null;
-}
-
-// ============================================================================
-// Routing Config
-// ============================================================================
-
-/** Module-level cache for routing config (30-second TTL) */
-let _routingConfigCache = null;
-let _routingConfigTimestamp = 0;
-const ROUTING_CONFIG_TTL_MS = 30_000;
-
-/**
- * Load routing config with fallback defaults.
- * Creates default config if file doesn't exist.
- * Results are cached for 30 seconds to avoid repeated file I/O.
- */
-function loadRoutingConfig() {
- const now = Date.now();
- if (_routingConfigCache && (now - _routingConfigTimestamp) < ROUTING_CONFIG_TTL_MS) {
- return _routingConfigCache;
- }
- const defaults = {
- version: 1,
- modelRouting: {
- rules: [
- { subagentType: 'Explore', model: 'haiku', reason: 'Fast codebase search' },
- ],
- default: null,
- },
- backgroundRules: [
- { subagentType: 'Explore', forceBackground: false },
- ],
- planningEnforcement: {
- enabled: true,
- triggerSubagents: ['Plan'],
- triggerEnterPlanMode: true,
- },
- dryRunGate: {
- enabled: true,
- skipSubagents: ['Explore', 'Plan', 'claude-code-guide'],
- },
- auditLog: true,
- };
-
- try {
- if (fs.existsSync(ROUTING_CONFIG_FILE)) {
- const config = JSON.parse(fs.readFileSync(ROUTING_CONFIG_FILE, 'utf-8'));
- const merged = { ...defaults, ...config };
- _routingConfigCache = merged;
- _routingConfigTimestamp = now;
- return merged;
- }
- } catch (error) {
- logError('loadRoutingConfig', error);
- }
-
- // Create default config on first run
- try {
- const dir = path.dirname(ROUTING_CONFIG_FILE);
- if (!fs.existsSync(dir)) {
- fs.mkdirSync(dir, { recursive: true });
- }
- fs.writeFileSync(ROUTING_CONFIG_FILE, JSON.stringify(defaults, null, 2), 'utf-8');
- } catch {
- // Non-critical — works with in-memory defaults
- }
-
- _routingConfigCache = defaults;
- _routingConfigTimestamp = now;
- return defaults;
-}
-
-// ============================================================================
-// Audit Log
-// ============================================================================
-
-/**
- * Append an entry to the routing audit log.
- * @param {string} entry - Log entry
- * @param {Object} config - Routing config
- */
-const AUDIT_LOG_MAX_BYTES = 1_048_576; // 1 MB
-const AUDIT_LOG_KEEP_LINES = 500;
-
-function auditLog(entry, config) {
- if (!config.auditLog) return;
-
- try {
- // Rotate if log exceeds 1 MB: keep only the last 500 lines
- if (fs.existsSync(ROUTING_AUDIT_LOG)) {
- const stat = fs.statSync(ROUTING_AUDIT_LOG);
- if (stat.size > AUDIT_LOG_MAX_BYTES) {
- const content = fs.readFileSync(ROUTING_AUDIT_LOG, 'utf-8');
- const lines = content.split('\n');
- const truncated = lines.slice(-AUDIT_LOG_KEEP_LINES).join('\n');
- fs.writeFileSync(ROUTING_AUDIT_LOG, truncated);
- }
- }
-
- const timestamp = new Date().toISOString();
- const line = `[${timestamp}] ${entry}\n`;
- fs.appendFileSync(ROUTING_AUDIT_LOG, line);
- } catch {
- // Non-critical
- }
-}
-
-// ============================================================================
-// Handler 1: Code Review (existing behavior)
-// ============================================================================
-
-function codeReviewHandler(toolName, toolInput, _session) {
- // Only applies to git commit commands
- if (toolName !== 'Bash') return null;
-
- const cmd = toolInput?.command || '';
- if (!/git\s+commit\s/.test(cmd) || cmd.includes('--amend')) return null;
-
- // Check if code review was done this session
- const session = readJSONFile(CURRENT_SESSION_FILE, {});
- if (session.codeReviewDone === true) return null;
-
- return {
- additionalContext: [
- '',
- 'PRE-COMMIT REVIEW REMINDER:',
- 'No comprehensive code review was detected in this session.',
- 'Before committing significant changes, run: @comprehensive-code-review',
- '',
- 'The review checks for:',
- '- Ripple Map: unsynchronized cross-file changes',
- '- Reality Check: phantom imports, ghost methods, schema drift',
- '- Cross-boundary Sync: type parity, client parity, route-SDK match',
- '- Security, concurrency, error handling, and 7 other dimensions',
- '',
- 'Skip only for trivial changes (typo fixes, formatting, comments).',
- ' ',
- ].join('\n'),
- };
-}
-
-// ============================================================================
-// Handler 2: Model Routing
-// ============================================================================
-
-function routingHandler(toolName, toolInput, _session, config) {
- if (toolName !== 'Task') return null;
-
- const subagentType = toolInput?.subagent_type || '';
- if (!subagentType) return null;
-
- const result = { updatedInput: {} };
- let applied = false;
-
- // Model routing
- const modelRules = config.modelRouting?.rules || [];
- for (const rule of modelRules) {
- if (!rule.subagentType) continue;
- if (subagentType.toLowerCase() === rule.subagentType.toLowerCase()) {
- // Never override user's explicit model choice
- if (toolInput.model) {
- auditLog(`Task(${subagentType}) → user override preserved (model: ${toolInput.model})`, config);
- break;
- }
- result.updatedInput.model = rule.model;
- auditLog(`Task(${subagentType}) → model: ${rule.model} (${rule.reason})`, config);
- applied = true;
- break;
- }
- }
-
- // Background routing
- const bgRules = config.backgroundRules || [];
- for (const rule of bgRules) {
- if (!rule.subagentType) continue;
- if (subagentType.toLowerCase() === rule.subagentType.toLowerCase()) {
- // Only force background if not explicitly set by user/Claude
- if (rule.forceBackground && toolInput.run_in_background === undefined) {
- result.updatedInput.run_in_background = true;
- auditLog(`Task(${subagentType}) → background: true`, config);
- applied = true;
- }
- break;
- }
- }
-
- if (!applied && !toolInput.model) {
- auditLog(`Task(${subagentType}) → no override (no matching rule)`, config);
- }
-
- return Object.keys(result.updatedInput).length > 0 ? result : null;
-}
-
-// ============================================================================
-// Handler 3: Planning Enforcer
-// ============================================================================
-
-function planningHandler(toolName, toolInput, _session, config) {
- const planConfig = config.planningEnforcement;
- if (!planConfig?.enabled) return null;
-
- // Case 1: Task tool dispatching a Plan subagent
- if (toolName === 'Task') {
- const subagentType = toolInput?.subagent_type || '';
- const triggerSubagents = planConfig.triggerSubagents || ['Plan'];
-
- if (triggerSubagents.some(t => subagentType.toLowerCase() === t.toLowerCase())) {
- const template = loadPlanningTemplate();
- if (!template) return null;
-
- auditLog(`Task(${subagentType}) → planning template injected`, config);
-
- // Append template to the subagent's prompt via updatedInput.prompt
- const originalPrompt = toolInput?.prompt || '';
- return {
- updatedInput: {
- prompt: originalPrompt + '\n\n---\n\n' + template,
- },
- };
- }
- }
-
- // Case 2: EnterPlanMode — inject into main Claude's context
- if (toolName === 'EnterPlanMode' && planConfig.triggerEnterPlanMode) {
- auditLog('EnterPlanMode → planning template context injected', config);
-
- return {
- additionalContext: [
- 'PLANNING MODE ACTIVATED — Use this template for your plan:',
- '',
- loadPlanningTemplate() || '(Planning template not found)',
- '',
- 'IMPORTANT: Present the completed plan to the user and wait for',
- 'explicit approval before proceeding to implementation.',
- ].join('\n'),
- };
- }
-
- return null;
-}
-
-/**
- * Load the planning template from file.
- * @returns {string|null}
- */
-function loadPlanningTemplate() {
- try {
- if (fs.existsSync(PLANNING_TEMPLATE_FILE)) {
- return fs.readFileSync(PLANNING_TEMPLATE_FILE, 'utf-8');
- }
- } catch (error) {
- logError('loadPlanningTemplate', error);
- }
- return null;
-}
-
-// ============================================================================
-// Handler 4: Dry-Run Gate
-// ============================================================================
-
-function dryRunGateHandler(toolName, toolInput, _session, config) {
- const gateConfig = config.dryRunGate;
- if (!gateConfig?.enabled) return null;
-
- // Only applies to Task dispatches (heavy operations)
- if (toolName !== 'Task') return null;
-
- const subagentType = toolInput?.subagent_type || '';
- const skipTypes = gateConfig.skipSubagents || ['Explore', 'Plan', 'claude-code-guide'];
-
- // Skip for research/planning agents that don't need tested code
- if (skipTypes.some(t => subagentType.toLowerCase().includes(t.toLowerCase()))) {
- return null;
- }
-
- // Read session state for file tracking
- const session = readJSONFile(CURRENT_SESSION_FILE, {});
- const modifiedFiles = session.modifiedFiles || [];
- const testedFiles = session.testedFiles || [];
-
- if (modifiedFiles.length === 0) return null;
-
- // Find untested files
- const untestedFiles = modifiedFiles.filter(f => !testedFiles.includes(f));
-
- if (untestedFiles.length === 0) return null;
-
- // Build warning (advisory only — never deny)
- const fileList = untestedFiles.length <= 5
- ? untestedFiles.map(f => path.basename(f)).join(', ')
- : `${untestedFiles.slice(0, 5).map(f => path.basename(f)).join(', ')} (+${untestedFiles.length - 5} more)`;
-
- auditLog(`Task(${subagentType}) → dry-run warning: ${untestedFiles.length} untested files`, config);
-
- return {
- additionalContext: [
- 'UNTESTED CODE WARNING:',
- `${untestedFiles.length} modified file(s) have not been tested yet: ${fileList}`,
- '',
- 'Consider running tests before dispatching this task:',
- '- node --check (syntax verification)',
- '- vitest run (unit tests)',
- '- tsc --noEmit (type checking)',
- '',
- 'This is advisory — proceed if you are confident the code is correct.',
- ].join('\n'),
- };
-}
-
-// ============================================================================
-// Hook Response Output
-// ============================================================================
-
-/**
- * Output hook response as JSON to stdout.
- */
-function respond(hookOutput) {
- process.stdout.write(JSON.stringify({
- hookSpecificOutput: {
- hookEventName: 'PreToolUse',
- ...hookOutput,
- },
- }));
-}
-
-// ============================================================================
-// Main
-// ============================================================================
-
-async function preToolUse() {
- try {
- const input = await readStdin(3000);
- if (!input || input.trim() === '') {
- process.exit(0);
- }
-
- const data = JSON.parse(input);
- const toolName = data.tool_name || data.toolName || '';
- const toolInput = data.tool_input || data.arguments || {};
-
- // Load config once for all handlers
- const config = loadRoutingConfig();
-
- // Load session state once for handlers that need it
- const session = readJSONFile(CURRENT_SESSION_FILE, {});
-
- // Run all handlers
- const responses = [
- codeReviewHandler(toolName, toolInput, session),
- routingHandler(toolName, toolInput, session, config),
- planningHandler(toolName, toolInput, session, config),
- dryRunGateHandler(toolName, toolInput, session, config),
- ];
-
- // Merge all responses
- const merged = mergeResponses(responses);
-
- // If any handler produced output, send the merged response
- if (merged) {
- respond(merged);
- }
-
- process.exit(0);
- } catch (error) {
- logError('PreToolUse', error);
- process.exit(0); // Never block on hook errors
- }
-}
-
-preToolUse();
diff --git a/scripts/hooks/session-start-recall-utils.js b/scripts/hooks/session-start-recall-utils.js
deleted file mode 100644
index b1ee85dd..00000000
--- a/scripts/hooks/session-start-recall-utils.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Session-Start Recall Utilities
- *
- * Utility functions for proactive memory recall during session start.
- * Builds search queries from project context and formats recall results.
- */
-
-/**
- * Build FTS5 search query from project name and recent commits.
- * Strips conventional commit prefixes (fix:, feat(scope):, etc.)
- * @param {string} projectName
- * @param {string[]} recentCommits
- * @returns {string}
- */
-export function buildSessionRecallQuery(projectName, recentCommits = []) {
- const parts = [projectName];
- if (recentCommits.length > 0) {
- const cleaned = recentCommits
- .map(msg => msg.replace(/^(fix|feat|chore|refactor|perf|docs|test|style|ci)\(?[^)]*\)?:\s*/i, '').trim())
- .filter(msg => msg.length > 0);
- parts.push(...cleaned);
- }
- return parts.join(' ').trim();
-}
-
-/**
- * Format recall results for hook output display.
- * Shows max 2 observations per entity.
- * @param {Array<{name: string, observations: string[], similarity: number}>} results
- * @returns {string} Formatted output (empty string if no results)
- */
-export function formatRecallOutput(results) {
- if (!results || results.length === 0) return '';
- const lines = results.map(r => {
- const pct = Math.round(r.similarity * 100);
- const obs = r.observations.slice(0, 2).join('; ');
- return ` - ${r.name} (${pct}%): ${obs}`;
- });
- return lines.join('\n');
-}
diff --git a/scripts/hooks/session-start.js b/scripts/hooks/session-start.js
index 72ddd2e1..b9f07f95 100755
--- a/scripts/hooks/session-start.js
+++ b/scripts/hooks/session-start.js
@@ -1,647 +1,102 @@
#!/usr/bin/env node
-/**
- * SessionStart Hook - Claude Code Event-Driven Hooks
- *
- * Triggered at the start of each Claude Code session.
- *
- * Features:
- * - Checks MeMesh MCP server availability
- * - Auto-recalls last session key points from MeMesh
- * - Reads recommendations from last session
- * - Displays suggested skills to load
- * - Shows warnings (quota, slow tools, etc.)
- * - Initializes current session state
- */
+import { createRequire } from 'module';
+import { homedir } from 'os';
+import { join, basename } from 'path';
+import { existsSync } from 'fs';
-import {
- HOME_DIR,
- STATE_DIR,
- MEMESH_DB_PATH,
- THRESHOLDS,
- readJSONFile,
- writeJSONFile,
- sqliteQueryJSON,
- getTimeAgo,
- logError,
- queryActivePlans,
- renderTimelineCompact,
-} from './hook-utils.js';
-import { buildSessionRecallQuery, formatRecallOutput } from './session-start-recall-utils.js';
-import fs from 'fs';
-import path from 'path';
-import { execFileSync } from 'child_process';
+const require = createRequire(import.meta.url);
-// ============================================================================
-// File Paths
-// ============================================================================
-
-const CCB_HEARTBEAT_FILE = path.join(STATE_DIR, 'ccb-heartbeat.json');
-const MCP_SETTINGS_FILE = path.join(HOME_DIR, '.claude', 'mcp_settings.json');
-const RECOMMENDATIONS_FILE = path.join(STATE_DIR, 'recommendations.json');
-const SESSION_CONTEXT_FILE = path.join(STATE_DIR, 'session-context.json');
-const CURRENT_SESSION_FILE = path.join(STATE_DIR, 'current-session.json');
-const LAST_SESSION_CACHE_FILE = path.join(STATE_DIR, 'last-session-summary.json');
-
-/** Maximum cache age: 7 days */
-const CACHE_MAX_AGE_MS = 7 * 24 * 60 * 60 * 1000;
-
-// ============================================================================
-// MeMesh Status Check
-// ============================================================================
-
-/**
- * Check MeMesh MCP Server availability
- * @returns {{ configured: boolean, running: boolean, lastHeartbeat: string|null, serverPath: string|null }}
- */
-function checkCCBAvailability() {
- const result = {
- configured: false,
- running: false,
- lastHeartbeat: null,
- serverPath: null,
- };
-
- // Check if MeMesh is configured in MCP settings
- try {
- if (fs.existsSync(MCP_SETTINGS_FILE)) {
- const mcpSettings = JSON.parse(fs.readFileSync(MCP_SETTINGS_FILE, 'utf-8'));
-
- // Check for MeMesh and legacy names (backward compatibility)
- const ccbNames = [
- 'memesh',
- '@pcircle/memesh',
- '@pcircle/claude-code-buddy-mcp',
- 'claude-code-buddy',
- 'ccb',
- ];
-
- for (const name of ccbNames) {
- if (mcpSettings.mcpServers && mcpSettings.mcpServers[name]) {
- result.configured = true;
- result.serverPath = mcpSettings.mcpServers[name].args?.[0] || 'configured';
- break;
- }
- }
- }
- } catch (err) {
- logError('checkCCBAvailability:mcp_settings', err);
- }
-
- // Check heartbeat file (MeMesh writes this when running)
- try {
- if (fs.existsSync(CCB_HEARTBEAT_FILE)) {
- const heartbeat = JSON.parse(fs.readFileSync(CCB_HEARTBEAT_FILE, 'utf-8'));
- result.lastHeartbeat = heartbeat.timestamp;
-
- const heartbeatTime = new Date(heartbeat.timestamp).getTime();
- const now = Date.now();
-
- if (now - heartbeatTime < THRESHOLDS.HEARTBEAT_VALIDITY) {
- result.running = true;
- }
- }
- } catch (err) {
- logError('checkCCBAvailability:heartbeat', err);
- }
-
- return result;
-}
-
-/**
- * Display MeMesh status and reminder
- */
-function displayCCBStatus(ccbStatus) {
- console.log('═'.repeat(60));
- console.log(' 🤖 MeMesh Status');
- console.log('═'.repeat(60));
-
- if (!ccbStatus.configured) {
- console.log('');
- console.log(' ⚠️ MeMesh MCP Server is NOT configured!');
- console.log('');
- console.log(' MeMesh provides memory management and knowledge graph tools.');
- console.log(' To configure MeMesh, add it to ~/.claude/mcp_settings.json');
- console.log('');
- console.log(' Available MeMesh tools when connected:');
- console.log(' • buddy-remember: Query past knowledge');
- console.log(' • buddy-do: Execute common operations');
- console.log(' • memesh-create-entities: Store new knowledge to graph');
- console.log('');
- } else if (!ccbStatus.running) {
- console.log('');
- console.log(' ℹ️ MeMesh is configured but status unknown');
- console.log(` Path: ${ccbStatus.serverPath}`);
- console.log('');
- console.log(' 📝 REMINDER: Use MeMesh tools for memory management:');
- console.log('');
- console.log(' Before starting work:');
- console.log(' buddy-remember "relevant topic" - Query past experiences');
- console.log('');
- console.log(' After completing work:');
- console.log(' memesh-create-entities - Store new learnings');
- console.log(' memesh-record-mistake - Record errors for future reference');
- console.log('');
- console.log(' 💡 If MeMesh tools fail, check MCP server status.');
- console.log('');
- } else {
- console.log('');
- console.log(' ✅ MeMesh MCP Server is running');
- console.log(` Last heartbeat: ${ccbStatus.lastHeartbeat}`);
- console.log('');
- console.log(' 📋 Session Start Checklist:');
- console.log(' ☐ buddy-remember - Query relevant past knowledge');
- console.log('');
- console.log(' 📋 Session End Checklist:');
- console.log(' ☐ memesh-create-entities - Store new learnings');
- console.log(' ☐ memesh-record-mistake - Record any errors');
- console.log('');
- }
-
- console.log('═'.repeat(60));
- console.log('');
-}
-
-// ============================================================================
-// Memory Recall
-// ============================================================================
-
-/**
- * Try to read session summary from cache file (fast path).
- * Cache is written by stop.js on session end.
- * @returns {{ entityName: string, createdAt: string, metadata: object, keyPoints: string[] } | null}
- */
-function recallFromCache() {
+let input = '';
+process.stdin.setEncoding('utf8');
+process.stdin.on('data', (chunk) => { input += chunk; });
+process.stdin.on('end', () => {
try {
- if (!fs.existsSync(LAST_SESSION_CACHE_FILE)) {
- return null;
- }
-
- const cache = readJSONFile(LAST_SESSION_CACHE_FILE, null);
- if (!cache || !cache.savedAt || !cache.keyPoints) {
- return null;
- }
-
- // Check staleness
- const cacheAge = Date.now() - new Date(cache.savedAt).getTime();
- if (cacheAge > CACHE_MAX_AGE_MS) {
- // Stale cache — delete it
- try { fs.unlinkSync(LAST_SESSION_CACHE_FILE); } catch { /* ignore */ }
- return null;
- }
-
- return {
- entityName: 'session_cache',
- createdAt: cache.savedAt,
- metadata: {
- duration: cache.duration,
- toolCount: cache.toolCount,
- },
- keyPoints: cache.keyPoints,
- };
- } catch (error) {
- logError('recallFromCache', error);
- return null;
- }
-}
-
-/**
- * Recall recent session key points from MeMesh (slow path — SQLite query).
- * Used as fallback when cache is not available.
- * @returns {{ entityName: string, createdAt: string, metadata: object, keyPoints: string[] } | null}
- */
-function recallFromSQLite() {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) {
- return null;
- }
-
- const cutoffDate = new Date();
- cutoffDate.setDate(cutoffDate.getDate() - THRESHOLDS.RECALL_DAYS);
- const cutoffISO = cutoffDate.toISOString();
-
- const query = `
- SELECT id, name, metadata, created_at
- FROM entities
- WHERE type = ? AND created_at > ?
- ORDER BY created_at DESC
- LIMIT 1
- `.replace(/\n/g, ' ');
-
- // Use JSON mode to avoid pipe-split issues with | in metadata
- const entityRows = sqliteQueryJSON(
- MEMESH_DB_PATH,
- query,
- ['session_keypoint', cutoffISO]
- );
-
- if (!entityRows || entityRows.length === 0) {
- return null;
- }
-
- const row = entityRows[0];
- const entityId = row.id;
- const entityName = row.name;
- const createdAt = row.created_at;
-
- // Observations also use JSON mode for safety
- const obsRows = sqliteQueryJSON(
- MEMESH_DB_PATH,
- 'SELECT content FROM observations WHERE entity_id = ? ORDER BY created_at ASC',
- [entityId]
- );
-
- const keyPoints = (obsRows || []).map(r => r.content).filter(Boolean);
-
- let parsedMetadata = {};
- try {
- parsedMetadata = JSON.parse(row.metadata || '{}');
- } catch (parseErr) {
- logError('recallFromSQLite:metadata-parse', parseErr);
- }
-
- return {
- entityName,
- createdAt,
- metadata: parsedMetadata,
- keyPoints,
- };
- } catch (error) {
- logError('recallFromSQLite', error);
- return null;
- }
-}
-
-/**
- * Recall recent session key points — cache-first, SQLite fallback.
- * @returns {{ entityName: string, createdAt: string, metadata: object, keyPoints: string[] } | null}
- */
-function recallRecentKeyPoints() {
- // Fast path: read from cache file (no sqlite3 spawn)
- const cached = recallFromCache();
- if (cached) {
- return cached;
- }
-
- // Slow path: query SQLite
- return recallFromSQLite();
-}
-
-/**
- * Display recalled key points from last session
- */
-function displayRecalledMemory(recalledData) {
- console.log('═'.repeat(60));
- console.log(' 🧠 MeMesh Memory Recall');
- console.log('═'.repeat(60));
-
- if (!recalledData || !recalledData.keyPoints || recalledData.keyPoints.length === 0) {
- console.log('');
- console.log(' ℹ️ No recent memories found (last 30 days)');
- console.log(' 💡 Memories will be auto-saved when this session ends');
- console.log('');
- console.log('═'.repeat(60));
- console.log('');
- return;
- }
-
- console.log('');
-
- // Display timestamp
- const savedTime = new Date(recalledData.createdAt);
- const timeAgo = getTimeAgo(savedTime);
- console.log(` 🕐 Saved: ${timeAgo}`);
-
- // Display metadata if available
- if (recalledData.metadata) {
- const meta = recalledData.metadata;
- if (meta.duration) {
- console.log(` ⏱️ Last session duration: ${meta.duration}`);
- }
- if (meta.toolCount) {
- console.log(` 🛠️ Tools used: ${meta.toolCount}`);
- }
- }
-
- console.log('');
- console.log(' 📋 Key Points:');
-
- // Display key points with formatting
- recalledData.keyPoints.forEach(point => {
- if (point.startsWith('[SESSION]')) {
- console.log(` 📊 ${point.replace('[SESSION] ', '')}`);
- } else if (point.startsWith('[WORK]')) {
- console.log(` 📁 ${point.replace('[WORK] ', '')}`);
- } else if (point.startsWith('[COMMIT]')) {
- console.log(` ✅ ${point.replace('[COMMIT] ', '')}`);
- } else if (point.startsWith('[ISSUE]') || point.startsWith('[PROBLEM]')) {
- console.log(` ⚠️ ${point.replace(/\[(ISSUE|PROBLEM)\] /, '')}`);
- } else if (point.startsWith('[LEARN]')) {
- console.log(` 💡 ${point.replace('[LEARN] ', '')}`);
- } else if (point.startsWith('[TASK]')) {
- console.log(` 📝 ${point.replace('[TASK] ', '')}`);
- } else if (point.startsWith('[DECISION]')) {
- console.log(` 🎯 ${point.replace('[DECISION] ', '')}`);
- } else if (point.startsWith('[PATTERN]')) {
- console.log(` 🔄 ${point.replace('[PATTERN] ', '')}`);
- } else if (point.startsWith('[SCOPE]') || point.startsWith('[FOCUS]')) {
- console.log(` 🎯 ${point.replace(/\[(SCOPE|FOCUS)\] /, '')}`);
- } else if (point.startsWith('[NOTE]')) {
- console.log(` 📌 ${point.replace('[NOTE] ', '')}`);
- } else {
- console.log(` • ${point}`);
+ const data = JSON.parse(input);
+ const projectName = basename(data.cwd || process.cwd());
+
+ // Find database
+ const dbPath = process.env.MEMESH_DB_PATH || join(homedir(), '.memesh', 'knowledge-graph.db');
+ if (!existsSync(dbPath)) {
+ output('MeMesh: No database found. Memories will be created as you work.');
+ return;
}
- });
-
- console.log('');
- console.log('═'.repeat(60));
- console.log('');
-}
-// ============================================================================
-// CLAUDE.md Reload
-// ============================================================================
-
-/**
- * Find and display project CLAUDE.md content on session start.
- * This ensures instructions are fresh in context even after compaction.
- * Searches: CWD/.claude/CLAUDE.md, CWD/CLAUDE.md
- */
-function reloadClaudeMd() {
- const cwd = process.cwd();
- const candidates = [
- path.join(cwd, '.claude', 'CLAUDE.md'),
- path.join(cwd, 'CLAUDE.md'),
- ];
-
- for (const candidate of candidates) {
+ const Database = require('better-sqlite3');
+ const db = new Database(dbPath, { readonly: true });
try {
- if (fs.existsSync(candidate)) {
- const content = fs.readFileSync(candidate, 'utf-8');
- const lineCount = content.split('\n').length;
- const relativePath = path.relative(cwd, candidate);
-
- console.log('═'.repeat(60));
- console.log(' 📋 CLAUDE.md Reloaded');
- console.log('═'.repeat(60));
- console.log('');
- console.log(` Source: ${relativePath} (${lineCount} lines)`);
- console.log('');
- console.log(content);
- console.log('');
- console.log('═'.repeat(60));
- console.log('');
+ db.pragma('journal_mode = WAL');
+
+ // Check if tables exist (db may exist but be empty)
+ const tableCheck = db.prepare(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name='entities'"
+ ).get();
+ if (!tableCheck) {
+ output('MeMesh: Database exists but no memories stored yet.');
return;
}
- } catch (readErr) {
- logError(`reloadClaudeMd:${candidate}`, readErr);
- }
- }
-}
-
-// ============================================================================
-// Main Session Start Logic
-// ============================================================================
-
-// ============================================================================
-// Active Plans Display (Beta)
-// ============================================================================
-
-/**
- * Display active plans with compact timeline.
- * Also warns about stale plans (no progress for 7+ days).
- */
-function displayActivePlans() {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) return;
-
- const activePlans = queryActivePlans(MEMESH_DB_PATH);
- if (activePlans.length === 0) return;
-
- console.log('═'.repeat(60));
- console.log(' 📋 Active Plans');
- console.log('═'.repeat(60));
- console.log('');
- for (const plan of activePlans) {
- console.log(renderTimelineCompact(plan));
-
- // Stale plan warning (7+ days since last progress)
- const lastStep = plan.metadata.stepsDetail
- ?.filter(s => s.completed && s.date)
- .sort((a, b) => b.date.localeCompare(a.date))[0];
- if (lastStep && lastStep.date) {
- const daysSince = Math.floor((Date.now() - new Date(lastStep.date).getTime()) / (1000 * 60 * 60 * 24));
- if (daysSince >= 7) {
- console.log(` ⚠️ No progress for ${daysSince} days`);
+ // Query project-specific recent entities with their observations
+ const projectTag = `project:${projectName}`;
+ const projectEntities = db.prepare(`
+ SELECT DISTINCT e.id, e.name, e.type, e.created_at
+ FROM entities e
+ JOIN tags t ON t.entity_id = e.id
+ WHERE t.tag = ?
+ ORDER BY e.id DESC
+ LIMIT 10
+ `).all(projectTag);
+
+ // Fetch observations for each entity (up to 3 per entity)
+ const getObservations = db.prepare(
+ 'SELECT content FROM observations WHERE entity_id = ? ORDER BY id DESC LIMIT 3'
+ );
+
+ // Query global recent entities
+ const recentEntities = db.prepare(`
+ SELECT id, name, type, created_at
+ FROM entities
+ ORDER BY id DESC
+ LIMIT 5
+ `).all();
+
+ // Build recall message
+ const lines = [];
+ if (projectEntities.length > 0) {
+ lines.push(`Project "${projectName}" memories (${projectEntities.length}):`);
+ for (const e of projectEntities) {
+ lines.push(` - [${e.type}] ${e.name}`);
+ const obs = getObservations.all(e.id);
+ for (const o of obs) {
+ lines.push(` ${o.content}`);
+ }
}
}
- console.log('');
- }
-
- console.log('═'.repeat(60));
- console.log('');
- } catch (error) {
- logError('displayActivePlans', error);
- }
-}
-
-/**
- * Proactive memory recall — queries KG for memories related to current project.
- * Uses project name + last 3 git commits as search query.
- */
-function recallProactiveMemories() {
- try {
- const projectName = path.basename(process.cwd());
-
- let recentCommits = [];
- try {
- const gitOutput = execFileSync('git', ['log', '--oneline', '-3', '--format=%s'], {
- timeout: 3000,
- encoding: 'utf-8',
- cwd: process.cwd(),
- });
- recentCommits = gitOutput.trim().split('\n').filter(Boolean);
- } catch {
- // Not a git repo or no commits
- }
-
- const query = buildSessionRecallQuery(projectName, recentCommits);
- if (!query) return;
-
- // Build FTS5 tokens
- const ftsTokens = query.split(/\s+/)
- .filter(t => t.length > 2)
- .slice(0, 10)
- .map(t => `"${t.replace(/"/g, '""')}"*`)
- .join(' OR ');
-
- if (!ftsTokens) return;
-
- const sql = `
- SELECT e.name, e.type,
- (SELECT json_group_array(content) FROM observations o WHERE o.entity_id = e.id) as observations_json
- FROM entities e
- JOIN entities_fts ON entities_fts.rowid = e.id
- WHERE entities_fts MATCH ?
- ORDER BY bm25(entities_fts, 10.0, 5.0)
- LIMIT 5
- `;
-
- const result = sqliteQueryJSON(MEMESH_DB_PATH, sql, [ftsTokens]);
- if (!result || result.length === 0) return;
-
- const formatted = result.map(r => ({
- name: r.name,
- observations: JSON.parse(r.observations_json || '[]').filter(Boolean).slice(0, 3),
- similarity: 0.5,
- }));
-
- const output = formatRecallOutput(formatted);
- if (output) {
- console.log('\n Proactive Memory Recall:');
- console.log(output);
- }
- } catch (error) {
- logError('proactive-recall', error);
- }
-}
-
-function sessionStart() {
- console.log('\n🚀 Smart-Agents Session Started\n');
-
- // Reload project CLAUDE.md into context
- reloadClaudeMd();
-
- // Check MeMesh availability
- const ccbStatus = checkCCBAvailability();
- displayCCBStatus(ccbStatus);
-
- // Auto-recall last session's key points from MeMesh
- const recalledMemory = recallRecentKeyPoints();
- displayRecalledMemory(recalledMemory);
-
- // Proactive memory recall (project context + recent commits)
- recallProactiveMemories();
-
- // Display active plans (beta)
- displayActivePlans();
-
- // Read recommendations from last session
- const recommendations = readJSONFile(RECOMMENDATIONS_FILE, {
- recommendedSkills: [],
- detectedPatterns: [],
- warnings: [],
- lastUpdated: null,
- });
-
- // Read session context
- const sessionContext = readJSONFile(SESSION_CONTEXT_FILE, {
- tokenQuota: { used: 0, limit: 200000 },
- learnedPatterns: [],
- lastSessionDate: null,
- });
-
- // Display recommendations
- if (recommendations.recommendedSkills?.length > 0) {
- console.log('📚 Recommended skills based on last session:');
- recommendations.recommendedSkills.forEach(skill => {
- const priority = skill.priority === 'high' ? '🔴' : skill.priority === 'medium' ? '🟡' : '🟢';
- console.log(` ${priority} ${skill.name} - ${skill.reason}`);
- });
- console.log('');
- }
-
- // Display detected patterns
- if (recommendations.detectedPatterns?.length > 0) {
- console.log('✨ Detected patterns:');
- recommendations.detectedPatterns.slice(0, 3).forEach(pattern => {
- console.log(` • ${pattern.description}`);
- if (pattern.suggestion) {
- console.log(` 💡 ${pattern.suggestion}`);
+ if (recentEntities.length > 0) {
+ lines.push('');
+ lines.push('Recent memories:');
+ for (const e of recentEntities) {
+ lines.push(` - [${e.type}] ${e.name}`);
+ const obs = getObservations.all(e.id);
+ for (const o of obs) {
+ lines.push(` ${o.content}`);
+ }
+ }
+ }
+ if (lines.length === 0) {
+ lines.push('MeMesh: No memories found yet. Use remember tool to store knowledge.');
}
- });
- console.log('');
- }
-
- // Display warnings
- if (recommendations.warnings?.length > 0) {
- console.log('⚠️ Warnings:');
- recommendations.warnings.forEach(warning => {
- console.log(` • ${warning}`);
- });
- console.log('');
- }
-
- // Display quota info (guard against division by zero)
- const quotaLimit = sessionContext.tokenQuota?.limit || 1;
- const quotaUsed = sessionContext.tokenQuota?.used || 0;
- const quotaPercentNum = (quotaUsed / quotaLimit) * 100;
- if (quotaPercentNum > 80) {
- console.log(`🔴 Quota usage: ${quotaPercentNum.toFixed(1)}% (please monitor usage)\n`);
- } else if (quotaPercentNum > 50) {
- console.log(`🟡 Quota usage: ${quotaPercentNum.toFixed(1)}%\n`);
- }
-
- // Initialize current session
- const currentSession = {
- startTime: new Date().toISOString(),
- toolCalls: [],
- patterns: [],
- ccbStatus: ccbStatus,
- };
-
- if (writeJSONFile(CURRENT_SESSION_FILE, currentSession)) {
- console.log('✅ Session initialized, ready to work!\n');
- } else {
- console.log('⚠️ Session initialization failed, but you can continue working\n');
- }
-
- // ── User-facing acknowledgment instruction ──
- // Hook stdout is only visible to Claude (system-reminder), not the user.
- // Instruct Claude to briefly acknowledge MeMesh in its first response,
- // so the user knows the plugin is active and helping.
- const sessionCount = recalledMemory?.keyPoints?.length || 0;
- const hasRecommendations = recommendations.recommendedSkills?.length > 0;
- const hasPatterns = recommendations.detectedPatterns?.length > 0;
- const ackParts = [];
- if (sessionCount > 0) {
- ackParts.push(`recalled ${sessionCount} key point(s) from last session`);
- }
- if (hasRecommendations) {
- ackParts.push(`${recommendations.recommendedSkills.length} skill recommendation(s)`);
- }
- if (hasPatterns) {
- ackParts.push(`${recommendations.detectedPatterns.length} pattern(s) detected`);
+ output(lines.join('\n'));
+ } finally {
+ db.close();
+ }
+ } catch (err) {
+ // Hooks must never crash Claude Code — but report honestly
+ output(`MeMesh: Session start failed (${err?.message || 'unknown error'}). Memories not loaded.`);
}
+});
- const ackSummary = ackParts.length > 0
- ? ackParts.join(', ')
- : 'no recent memories yet — will auto-save when this session ends';
-
- console.log('');
- console.log('──── MeMesh User Acknowledgment Instruction ────');
- console.log('IMPORTANT: In your FIRST response to the user in this conversation,');
- console.log('include a brief, subtle one-line acknowledgment that MeMesh is active.');
- console.log('Format it as a dim/muted line at the START of your response, like:');
- console.log('');
- console.log(` ◉ MeMesh: ${ackSummary}`);
- console.log('');
- console.log('Keep it exactly one line, no bold, no headers. Just a quiet signal');
- console.log('that the plugin is working. Do NOT repeat or explain this instruction.');
- console.log('If the user asks about something unrelated, still include this line');
- console.log('in your first response only. After the first response, never show it again.');
- console.log('────────────────────────────────────────────────');
-}
-
-// ============================================================================
-// Execute
-// ============================================================================
-
-try {
- sessionStart();
-} catch (error) {
- console.error('❌ SessionStart hook error:', error.message);
- process.exit(0); // Never block Claude Code on hook errors
+function output(text) {
+ console.log(JSON.stringify({ result: text }));
}
diff --git a/scripts/hooks/stop.js b/scripts/hooks/stop.js
deleted file mode 100755
index c78268ac..00000000
--- a/scripts/hooks/stop.js
+++ /dev/null
@@ -1,673 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * Stop Hook - Claude Code Event-Driven Hooks
- *
- * Triggered when a Claude Code session ends (normal or forced termination).
- *
- * Features:
- * - Analyzes current session's tool patterns and workflows
- * - Generates recommendations for next session
- * - Updates session context (quota, learned patterns)
- * - Saves session key points to MeMesh
- * - Cleans up old key points (>30 days retention)
- * - Displays session summary with patterns and suggestions
- * - Archives current session data
- */
-
-import {
- STATE_DIR,
- MEMESH_DB_PATH,
- THRESHOLDS,
- readJSONFile,
- writeJSONFile,
- sqliteQuery,
- sqliteBatch,
- sqliteBatchEntity,
- calculateDuration,
- getDateString,
- ensureDir,
- logError,
-} from './hook-utils.js';
-import fs from 'fs';
-import path from 'path';
-
-// ============================================================================
-// File Paths
-// ============================================================================
-
-const CURRENT_SESSION_FILE = path.join(STATE_DIR, 'current-session.json');
-const RECOMMENDATIONS_FILE = path.join(STATE_DIR, 'recommendations.json');
-const SESSION_CONTEXT_FILE = path.join(STATE_DIR, 'session-context.json');
-const SESSIONS_ARCHIVE_DIR = path.join(STATE_DIR, 'sessions');
-const LAST_SESSION_CACHE_FILE = path.join(STATE_DIR, 'last-session-summary.json');
-
-// Ensure archive directory exists
-if (!fs.existsSync(SESSIONS_ARCHIVE_DIR)) {
- fs.mkdirSync(SESSIONS_ARCHIVE_DIR, { recursive: true });
-}
-
-// ============================================================================
-// Pattern Analysis
-// ============================================================================
-
-/**
- * Analyze tool patterns from session state
- * @param {Object} sessionState - Current session state
- * @returns {Array} Detected patterns
- */
-function analyzeToolPatterns(sessionState) {
- const patterns = [];
-
- if (!sessionState.toolCalls || sessionState.toolCalls.length === 0) {
- return patterns;
- }
-
- // Count tool frequency
- const toolFrequency = {};
- sessionState.toolCalls.forEach(tc => {
- toolFrequency[tc.toolName] = (toolFrequency[tc.toolName] || 0) + 1;
- });
-
- // Find most used tools
- const mostUsedTools = Object.entries(toolFrequency)
- .sort((a, b) => b[1] - a[1])
- .slice(0, 3);
-
- if (mostUsedTools.length > 0) {
- patterns.push({
- type: 'MOST_USED_TOOLS',
- description: mostUsedTools.map(([tool, count]) => `${tool} (${count}x)`).join(', '),
- severity: 'info',
- });
- }
-
- // Check READ_BEFORE_EDIT compliance
- let editWithoutRead = 0;
- let editWithRead = 0;
-
- for (let i = 0; i < sessionState.toolCalls.length; i++) {
- const tool = sessionState.toolCalls[i];
- if (tool.toolName === 'Edit') {
- const recentReads = sessionState.toolCalls
- .slice(Math.max(0, i - 5), i)
- .filter(t => t.toolName === 'Read');
-
- if (recentReads.length > 0) {
- editWithRead++;
- } else {
- editWithoutRead++;
- }
- }
- }
-
- if (editWithRead + editWithoutRead > 0) {
- const compliance = (editWithRead / (editWithRead + editWithoutRead) * 100).toFixed(0);
- patterns.push({
- type: 'READ_BEFORE_EDIT_COMPLIANCE',
- description: `READ_BEFORE_EDIT compliance: ${compliance}%`,
- severity: compliance >= 80 ? 'info' : 'warning',
- suggestion: compliance < 80 ? 'Read files before editing to avoid errors' : 'Good practice!',
- });
- }
-
- // Detect Git workflow
- const gitOps = sessionState.toolCalls.filter(tc =>
- tc.toolName === 'Bash' && ['git add', 'git commit', 'git push', 'git branch'].some(cmd =>
- tc.arguments?.command?.includes(cmd)
- )
- );
-
- if (gitOps.length >= 3) {
- patterns.push({
- type: 'GIT_WORKFLOW',
- description: `Executed ${gitOps.length} Git operations`,
- severity: 'info',
- suggestion: 'Consider loading devops-git-workflows skill next time',
- });
- }
-
- // Detect frontend work
- const frontendOps = sessionState.toolCalls.filter(tc =>
- ['Edit', 'Write', 'Read'].includes(tc.toolName) &&
- ['.tsx', '.jsx', '.vue', '.css'].some(ext => tc.arguments?.file_path?.endsWith(ext))
- );
-
- if (frontendOps.length >= 5) {
- patterns.push({
- type: 'FRONTEND_WORK',
- description: `Modified ${frontendOps.length} frontend files`,
- severity: 'info',
- suggestion: 'Consider loading frontend-design skill next time',
- });
- }
-
- // Detect slow operations
- const slowOps = sessionState.toolCalls.filter(tc => tc.duration && tc.duration > THRESHOLDS.SLOW_EXECUTION);
- if (slowOps.length > 0) {
- patterns.push({
- type: 'SLOW_OPERATIONS',
- description: `${slowOps.length} tools took >5 seconds`,
- severity: 'warning',
- suggestion: 'Consider optimizing these operations or using faster alternatives',
- });
- }
-
- // Detect failures
- const failures = sessionState.toolCalls.filter(tc => tc.success === false);
- if (failures.length > 0) {
- patterns.push({
- type: 'EXECUTION_FAILURES',
- description: `${failures.length} tool executions failed`,
- severity: 'error',
- suggestion: 'Review and fix failure causes',
- });
- }
-
- return patterns;
-}
-
-// ============================================================================
-// Recommendations
-// ============================================================================
-
-/**
- * Save recommendations for next session
- * @param {Array} patterns - Detected patterns
- * @param {Object} sessionState - Current session state
- */
-function saveRecommendations(patterns, sessionState) {
- const recommendations = readJSONFile(RECOMMENDATIONS_FILE, {
- recommendedSkills: [],
- detectedPatterns: [],
- warnings: [],
- lastUpdated: null,
- });
-
- // Add skill recommendations based on patterns
- patterns.forEach(pattern => {
- if (pattern.suggestion && pattern.suggestion.includes('skill')) {
- const skillMatch = pattern.suggestion.match(/loading\s+(.+?)\s+skill/);
- if (skillMatch) {
- const skillName = skillMatch[1];
- const existing = recommendations.recommendedSkills.find(s => s.name === skillName);
-
- if (!existing) {
- recommendations.recommendedSkills.push({
- name: skillName,
- reason: pattern.description,
- priority: pattern.type.includes('GIT') || pattern.type.includes('FRONTEND') ? 'high' : 'medium',
- });
- }
- }
- }
- });
-
- // Keep only top 5 skills
- recommendations.recommendedSkills = recommendations.recommendedSkills.slice(0, 5);
-
- // Merge patterns with existing (keep last 10)
- patterns.forEach(pattern => {
- recommendations.detectedPatterns.unshift({
- description: pattern.description,
- suggestion: pattern.suggestion || '',
- timestamp: new Date().toISOString(),
- });
- });
- recommendations.detectedPatterns = recommendations.detectedPatterns.slice(0, 10);
-
- // Add warnings from error/warning severity patterns
- patterns.filter(p => p.severity === 'warning' || p.severity === 'error').forEach(pattern => {
- if (pattern.suggestion) {
- recommendations.warnings.unshift(pattern.suggestion);
- } else {
- recommendations.warnings.unshift(pattern.description);
- }
- });
- recommendations.warnings = recommendations.warnings.slice(0, 5);
-
- recommendations.lastUpdated = new Date().toISOString();
-
- writeJSONFile(RECOMMENDATIONS_FILE, recommendations);
-}
-
-// ============================================================================
-// Session Context Update
-// ============================================================================
-
-/**
- * Update session context (quota, patterns)
- * @param {Object} sessionState - Current session state
- * @returns {Object} Updated session context
- */
-function updateSessionContext(sessionState) {
- const sessionContext = readJSONFile(SESSION_CONTEXT_FILE, {
- tokenQuota: { used: 0, limit: 200000 },
- learnedPatterns: [],
- lastSessionDate: null,
- });
-
- // Calculate total token usage from session
- let totalTokens = 0;
- if (sessionState.toolCalls) {
- sessionState.toolCalls.forEach(tc => {
- if (tc.tokenUsage) {
- totalTokens += tc.tokenUsage;
- }
- });
- }
-
- // Update quota
- sessionContext.tokenQuota.used = Math.min(
- sessionContext.tokenQuota.used + totalTokens,
- sessionContext.tokenQuota.limit
- );
-
- // Add learned patterns
- if (sessionState.patterns) {
- sessionState.patterns.forEach(pattern => {
- const existing = sessionContext.learnedPatterns.find(p => p.type === pattern.type);
- if (!existing) {
- sessionContext.learnedPatterns.push({
- type: pattern.type,
- count: pattern.count,
- lastSeen: new Date().toISOString(),
- });
- } else {
- existing.count += pattern.count;
- existing.lastSeen = new Date().toISOString();
- }
- });
- }
-
- sessionContext.lastSessionDate = new Date().toISOString();
-
- writeJSONFile(SESSION_CONTEXT_FILE, sessionContext);
-
- return sessionContext;
-}
-
-// ============================================================================
-// MeMesh Memory Save
-// ============================================================================
-
-/**
- * Extract comprehensive key points from session for end summary
- * @param {Object} sessionState - Current session state
- * @param {Array} patterns - Analyzed patterns
- * @returns {Array} Key points
- */
-function extractSessionKeyPoints(sessionState, patterns) {
- const keyPoints = [];
-
- if (!sessionState) {
- return keyPoints;
- }
-
- // 1. Session overview
- const duration = calculateDuration(sessionState.startTime);
- const toolCount = sessionState.toolCalls?.length || 0;
- keyPoints.push(`[SESSION] Duration: ${duration}, Tools used: ${toolCount}`);
-
- // 2. Files modified (task summary)
- if (sessionState.toolCalls) {
- const fileOps = {};
- sessionState.toolCalls.forEach(tc => {
- if (['Edit', 'Write'].includes(tc.toolName) && tc.arguments?.file_path) {
- const filePath = tc.arguments.file_path;
- fileOps[filePath] = (fileOps[filePath] || 0) + 1;
- }
- });
-
- const modifiedFiles = Object.keys(fileOps);
- if (modifiedFiles.length > 0) {
- // Group by directory
- const dirs = {};
- modifiedFiles.forEach(f => {
- const dir = path.dirname(f);
- if (!dirs[dir]) dirs[dir] = [];
- dirs[dir].push(path.basename(f));
- });
-
- const summary = Object.entries(dirs)
- .map(([dir, files]) => {
- const shortDir = dir.split('/').slice(-2).join('/');
- return `${shortDir}: ${files.slice(0, 3).join(', ')}${files.length > 3 ? '...' : ''}`;
- })
- .slice(0, 3)
- .join(' | ');
-
- keyPoints.push(`[WORK] ${modifiedFiles.length} files modified: ${summary}`);
- }
- }
-
- // 3. Git operations (commits = completed work)
- if (sessionState.toolCalls) {
- const gitCommits = sessionState.toolCalls.filter(tc =>
- tc.toolName === 'Bash' && tc.arguments?.command?.includes('git commit')
- );
- if (gitCommits.length > 0) {
- keyPoints.push(`[COMMIT] ${gitCommits.length} commit(s) made`);
- }
- }
-
- // 4. Problems encountered
- if (sessionState.toolCalls) {
- const failures = sessionState.toolCalls.filter(tc => tc.success === false);
- if (failures.length > 0) {
- const failedTools = [...new Set(failures.map(f => f.toolName))];
- keyPoints.push(`[ISSUE] ${failures.length} failures: ${failedTools.join(', ')}`);
- }
- }
-
- // 5. Detected patterns (learnings)
- if (patterns && patterns.length > 0) {
- const significantPatterns = patterns
- .filter(p => p.severity === 'warning' || p.severity === 'error' || p.suggestion)
- .slice(0, 3);
-
- significantPatterns.forEach(p => {
- if (p.suggestion) {
- keyPoints.push(`[LEARN] ${p.description} -> ${p.suggestion}`);
- } else {
- keyPoints.push(`[NOTE] ${p.description}`);
- }
- });
- }
-
- // 6. Most used tools (work focus)
- if (sessionState.toolCalls && sessionState.toolCalls.length > 5) {
- const toolCounts = {};
- sessionState.toolCalls.forEach(tc => {
- toolCounts[tc.toolName] = (toolCounts[tc.toolName] || 0) + 1;
- });
-
- const topTools = Object.entries(toolCounts)
- .sort((a, b) => b[1] - a[1])
- .slice(0, 3)
- .map(([tool, count]) => `${tool}(${count})`)
- .join(', ');
-
- keyPoints.push(`[FOCUS] Top tools: ${topTools}`);
- }
-
- return keyPoints;
-}
-
-/**
- * Save session key points to MeMesh on session end.
- * Uses sqliteBatchEntity for performance (3 spawns instead of N).
- * @param {Object} sessionState - Current session state
- * @param {Array} patterns - Analyzed patterns
- * @returns {boolean} True if saved successfully
- */
-function saveSessionKeyPointsOnEnd(sessionState, patterns) {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) {
- console.log('🧠 MeMesh: Database not found, skipping memory save');
- return false;
- }
-
- const keyPoints = extractSessionKeyPoints(sessionState, patterns);
- if (keyPoints.length === 0) {
- return false;
- }
-
- const entityName = `session_end_${Date.now()}`;
- const startTime = new Date(sessionState.startTime);
- const duration = Math.round((Date.now() - startTime.getTime()) / 1000 / 60);
-
- const metadata = JSON.stringify({
- duration: `${duration}m`,
- toolCount: sessionState.toolCalls?.length || 0,
- saveReason: 'session_end',
- patternCount: patterns.length,
- });
-
- const today = getDateString();
- const tags = ['session_end', 'auto_saved', today];
-
- // Batch: entity + observations + tags in 2 process spawns (was ~10)
- const entityId = sqliteBatchEntity(
- MEMESH_DB_PATH,
- { name: entityName, type: 'session_keypoint', metadata },
- keyPoints,
- tags
- );
-
- if (entityId === null) {
- return false;
- }
-
- console.log(`🧠 MeMesh: Saved ${keyPoints.length} key points to memory`);
- return true;
- } catch (error) {
- console.error(`🧠 MeMesh: Failed to save session key points: ${error.message}`);
- return false;
- }
-}
-
-/**
- * Clean up old key points (older than retention period).
- * Uses batch delete for performance.
- */
-function cleanupOldKeyPoints() {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) {
- return;
- }
-
- const cutoffDate = new Date();
- cutoffDate.setDate(cutoffDate.getDate() - THRESHOLDS.RETENTION_DAYS);
- const cutoffISO = cutoffDate.toISOString();
-
- // Count old entries
- const countResult = sqliteQuery(
- MEMESH_DB_PATH,
- 'SELECT COUNT(*) FROM entities WHERE type = ? AND created_at < ?',
- ['session_keypoint', cutoffISO]
- );
-
- const oldCount = parseInt(countResult, 10) || 0;
-
- if (oldCount > 0) {
- // Batch delete: tags + entities in 2 statements (was N+2 spawns)
- sqliteBatch(MEMESH_DB_PATH, [
- {
- query: `DELETE FROM tags WHERE entity_id IN (
- SELECT id FROM entities WHERE type = ? AND created_at < ?
- )`,
- params: ['session_keypoint', cutoffISO],
- },
- {
- query: 'DELETE FROM entities WHERE type = ? AND created_at < ?',
- params: ['session_keypoint', cutoffISO],
- },
- ]);
-
- console.log(`🧠 MeMesh: Cleaned up ${oldCount} expired memories (>${THRESHOLDS.RETENTION_DAYS} days)`);
- }
- } catch (error) {
- console.error(`🧠 MeMesh: Cleanup failed: ${error.message}`);
- }
-}
-
-// ============================================================================
-// Session Cache (for fast startup)
-// ============================================================================
-
-/**
- * Write session summary cache for fast recall on next startup.
- * Session-start.js reads this instead of querying SQLite.
- * @param {Object} sessionState - Current session state
- * @param {Array} patterns - Analyzed patterns
- */
-function writeSessionCache(sessionState, patterns) {
- try {
- const keyPoints = extractSessionKeyPoints(sessionState, patterns);
- const startTime = new Date(sessionState.startTime);
- const duration = Math.round((Date.now() - startTime.getTime()) / 1000 / 60);
-
- const cache = {
- savedAt: new Date().toISOString(),
- duration: `${duration}m`,
- toolCount: sessionState.toolCalls?.length || 0,
- keyPoints,
- };
-
- writeJSONFile(LAST_SESSION_CACHE_FILE, cache);
- } catch (error) {
- logError('writeSessionCache', error);
- }
-}
-
-// ============================================================================
-// Session Archive
-// ============================================================================
-
-/**
- * Archive current session
- * @param {Object} sessionState - Current session state
- */
-function archiveSession(sessionState) {
- const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
- const archiveFile = path.join(SESSIONS_ARCHIVE_DIR, `session-${timestamp}.json`);
-
- writeJSONFile(archiveFile, sessionState);
-
- // Keep only last N sessions (configurable via THRESHOLDS)
- try {
- // Ensure directory exists before reading
- ensureDir(SESSIONS_ARCHIVE_DIR);
-
- const sessions = fs.readdirSync(SESSIONS_ARCHIVE_DIR)
- .filter(f => f.startsWith('session-'))
- .sort()
- .reverse();
-
- const maxSessions = THRESHOLDS.MAX_ARCHIVED_SESSIONS;
- if (sessions.length > maxSessions) {
- sessions.slice(maxSessions).forEach(f => {
- try {
- fs.unlinkSync(path.join(SESSIONS_ARCHIVE_DIR, f));
- } catch (error) {
- logError('archiveSession.unlink', error);
- }
- });
- }
- } catch (error) {
- logError('archiveSession.readdir', error);
- }
-}
-
-// ============================================================================
-// Display Summary
-// ============================================================================
-
-/**
- * Display session summary
- * @param {Object} sessionState - Current session state
- * @param {Array} patterns - Detected patterns
- * @param {Object} sessionContext - Session context
- */
-function displaySessionSummary(sessionState, patterns, sessionContext) {
- console.log('\n📊 Session Summary\n');
-
- // Duration
- const duration = calculateDuration(sessionState.startTime);
- console.log(`⏱️ Duration: ${duration}`);
-
- // Tool executions
- const totalTools = sessionState.toolCalls?.length || 0;
- const successTools = sessionState.toolCalls?.filter(t => t.success !== false).length || 0;
- const failedTools = totalTools - successTools;
-
- console.log(`🛠️ Tool executions: ${totalTools} (success: ${successTools}, failed: ${failedTools})`);
-
- // Detected patterns
- if (patterns.length > 0) {
- console.log('\n✨ Detected patterns:');
- patterns.slice(0, 5).forEach(pattern => {
- const emoji = pattern.severity === 'error' ? '❌' : pattern.severity === 'warning' ? '⚠️' : '✅';
- console.log(` ${emoji} ${pattern.description}`);
- if (pattern.suggestion) {
- console.log(` 💡 ${pattern.suggestion}`);
- }
- });
- }
-
- // Recommendations for next session
- const recommendations = readJSONFile(RECOMMENDATIONS_FILE, { recommendedSkills: [] });
- if (recommendations.recommendedSkills?.length > 0) {
- console.log('\n💡 Recommended for next session:');
- recommendations.recommendedSkills.slice(0, 3).forEach(skill => {
- console.log(` • ${skill.name} (${skill.reason})`);
- });
- }
-
- // Quota status (guard against division by zero)
- const quotaLimit = sessionContext.tokenQuota?.limit || 1;
- const quotaUsed = sessionContext.tokenQuota?.used || 0;
- const quotaPercentNum = (quotaUsed / quotaLimit) * 100;
- const quotaEmoji = quotaPercentNum > 80 ? '🔴' : quotaPercentNum > 50 ? '🟡' : '🟢';
- console.log(`\n${quotaEmoji} Token quota: ${quotaPercentNum.toFixed(1)}% (${quotaUsed.toLocaleString()} / ${quotaLimit.toLocaleString()})`);
-
- console.log('\n✅ Session state saved\n');
-}
-
-// ============================================================================
-// Main Stop Hook Logic
-// ============================================================================
-
-function stopHook() {
- console.log('\n🛑 Smart-Agents Session Ending...\n');
-
- // Read current session state
- const sessionState = readJSONFile(CURRENT_SESSION_FILE, {
- startTime: new Date().toISOString(),
- toolCalls: [],
- patterns: [],
- });
-
- // Analyze patterns
- const patterns = analyzeToolPatterns(sessionState);
-
- // Save recommendations for next session
- saveRecommendations(patterns, sessionState);
-
- // Update session context
- const sessionContext = updateSessionContext(sessionState);
-
- // Archive session
- archiveSession(sessionState);
-
- // Save session key points to MeMesh
- saveSessionKeyPointsOnEnd(sessionState, patterns);
-
- // Write session cache for fast startup next time (1A.2)
- writeSessionCache(sessionState, patterns);
-
- // Clean up old key points (>30 days)
- cleanupOldKeyPoints();
-
- // Display summary
- displaySessionSummary(sessionState, patterns, sessionContext);
-
- // Clean up current session file
- try {
- fs.unlinkSync(CURRENT_SESSION_FILE);
- } catch {
- // Ignore if file doesn't exist
- }
-}
-
-// ============================================================================
-// Execute
-// ============================================================================
-
-try {
- stopHook();
-} catch (error) {
- console.error('❌ Stop hook error:', error.message);
- process.exit(0); // Never block Claude Code on hook errors
-}
diff --git a/scripts/hooks/subagent-stop.js b/scripts/hooks/subagent-stop.js
deleted file mode 100644
index 9e059dce..00000000
--- a/scripts/hooks/subagent-stop.js
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * SubagentStop Hook - Capture Subagent Results to MeMesh Knowledge Graph
- *
- * Triggered when a subagent finishes execution.
- *
- * Features:
- * - Saves code review findings to MeMesh KG (high-value results)
- * - Tracks code review completion for pre-commit enforcement
- * - Updates session state with subagent activity
- * - Silent operation (no console output)
- */
-
-import {
- STATE_DIR,
- MEMESH_DB_PATH,
- readJSONFile,
- writeJSONFile,
- sqliteBatchEntity,
- getDateString,
- readStdin,
- logError,
- logMemorySave,
-} from './hook-utils.js';
-import fs from 'fs';
-import path from 'path';
-
-// ============================================================================
-// Constants
-// ============================================================================
-
-const CURRENT_SESSION_FILE = path.join(STATE_DIR, 'current-session.json');
-
-/** Agent types that count as code review */
-const CODE_REVIEWER_TYPES = [
- 'code-reviewer',
- 'code-review',
- 'superpowers:code-reviewer',
- 'pr-review-toolkit:code-reviewer',
- 'feature-dev:code-reviewer',
-];
-
-// ============================================================================
-// Code Review Detection
-// ============================================================================
-
-/**
- * Check if this subagent is a code reviewer
- * @param {string} agentType - Subagent type identifier
- * @returns {boolean}
- */
-function isCodeReviewer(agentType) {
- if (!agentType) return false;
- const lower = agentType.toLowerCase();
- return CODE_REVIEWER_TYPES.some(t => lower.includes(t.toLowerCase()));
-}
-
-/**
- * Mark code review as done in session state.
- * This flag is checked by pre-tool-use.js before git commits.
- */
-function markCodeReviewDone() {
- const session = readJSONFile(CURRENT_SESSION_FILE, {});
- session.codeReviewDone = true;
- session.codeReviewTimestamp = new Date().toISOString();
- writeJSONFile(CURRENT_SESSION_FILE, session);
-}
-
-// ============================================================================
-// MeMesh KG Save
-// ============================================================================
-
-/**
- * Save code review results to MeMesh knowledge graph.
- * Only saves code reviewer subagent results (high-value findings).
- *
- * @param {string} agentType - Subagent type
- * @param {string} lastMessage - Agent's final response
- * @returns {boolean} True if saved
- */
-function saveSubagentToKG(agentType, lastMessage) {
- try {
- if (!fs.existsSync(MEMESH_DB_PATH)) return false;
- if (!lastMessage || lastMessage.length < 50) return false;
-
- // Truncate very long messages
- const shortMessage = lastMessage.length > 1000
- ? lastMessage.substring(0, 1000) + '...'
- : lastMessage;
-
- const entityName = `Code Review: ${getDateString()} ${Date.now()} ${agentType}`;
- const metadata = JSON.stringify({
- agentType,
- messageLength: lastMessage.length,
- source: 'subagent-stop-hook',
- });
-
- const tags = [
- 'code-review',
- `agent:${agentType}`,
- `date:${getDateString()}`,
- 'auto-tracked',
- 'scope:project',
- ];
-
- const entityId = sqliteBatchEntity(
- MEMESH_DB_PATH,
- { name: entityName, type: 'code_review', metadata },
- [`[${agentType}] ${shortMessage}`],
- tags
- );
-
- if (entityId === null) return false;
-
- logMemorySave(`Code review saved: ${agentType} (${lastMessage.length} chars)`);
- return true;
- } catch (error) {
- logError('saveSubagentToKG', error);
- return false;
- }
-}
-
-// ============================================================================
-// Session State Update
-// ============================================================================
-
-/**
- * Track subagent activity in session state
- * @param {string} agentType - Subagent type
- */
-function trackSubagentInSession(agentType) {
- const session = readJSONFile(CURRENT_SESSION_FILE, { subagents: [] });
- if (!session.subagents) session.subagents = [];
-
- session.subagents.push({
- type: agentType,
- completedAt: new Date().toISOString(),
- });
-
- // Keep only last 20 subagent entries
- if (session.subagents.length > 20) {
- session.subagents = session.subagents.slice(-20);
- }
-
- writeJSONFile(CURRENT_SESSION_FILE, session);
-}
-
-// ============================================================================
-// Main
-// ============================================================================
-
-async function subagentStop() {
- try {
- const input = await readStdin(3000);
- if (!input || input.trim() === '') {
- process.exit(0);
- }
-
- const data = JSON.parse(input);
- const agentType = data.agent_type || data.agentType || 'unknown';
- const lastMessage = data.last_assistant_message || data.lastAssistantMessage || '';
-
- // Track all subagent completions in session state
- trackSubagentInSession(agentType);
-
- // Track code review completion (for pre-commit enforcement)
- if (isCodeReviewer(agentType)) {
- markCodeReviewDone();
- }
-
- // Save code reviewer results to MeMesh KG (high-value findings)
- if (isCodeReviewer(agentType) && lastMessage.length > 50) {
- saveSubagentToKG(agentType, lastMessage);
- }
-
- process.exit(0);
- } catch (error) {
- logError('SubagentStop', error);
- process.exit(0); // Never block on hook errors
- }
-}
-
-subagentStop();
diff --git a/scripts/install-helpers.js b/scripts/install-helpers.js
deleted file mode 100755
index 7f059ab3..00000000
--- a/scripts/install-helpers.js
+++ /dev/null
@@ -1,278 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * MeMesh Installation Helpers
- *
- * Provides utility functions for:
- * - Creating/updating ~/.claude/mcp_settings.json
- * - Managing MCP server configuration
- * - Verifying installation
- *
- * IMPORTANT: The primary config file is ~/.claude/mcp_settings.json
- * This is what Claude Code's session-start hook checks for.
- */
-
-import fs from 'fs';
-import os from 'os';
-import path from 'path';
-
-// Primary MCP settings file - this is what Claude Code checks
-const MCP_SETTINGS_PATH = path.join(os.homedir(), '.claude', 'mcp_settings.json');
-
-// Legacy paths for backward compatibility checking (reserved for future migration utilities)
-// const _LEGACY_CONFIG_PATHS = [
-// path.join(os.homedir(), '.claude', 'config.json'),
-// path.join(os.homedir(), '.claude.json'),
-// path.join(os.homedir(), '.config', 'claude', 'claude_desktop_config.json'),
-// ];
-
-/**
- * Resolve the config path to use (reserved for future use)
- * Priority: env var > preferred path > MCP_SETTINGS_PATH
- * @param {string} [_preferredPath] - Preferred path (unused, reserved for future use)
- */
-function _resolveConfigPath(_preferredPath) {
- if (_preferredPath) {
- return _preferredPath;
- }
-
- // Check for environment variable (with backward compatibility)
- const envPath = process.env.MEMESH_MCP_CONFIG_PATH || process.env.CCB_MCP_CONFIG_PATH;
- if (envPath) {
- return envPath;
- }
-
- // Always use MCP_SETTINGS_PATH (~/.claude/mcp_settings.json) for Claude Code
- // This is what the session-start hook checks for
- return MCP_SETTINGS_PATH;
-}
-
-/**
- * Create MCP server configuration object (reserved for future use)
- * @param {string} _serverPath - Path to server bootstrap file (unused)
- */
-function _createServerConfig(_serverPath) {
- return {
- command: 'node',
- args: [_serverPath],
- env: {
- NODE_ENV: 'production'
- }
- };
-}
-
-// addToMcpConfig removed - replaced by configureMcpSettings function
-
-/**
- * Configure MeMesh in ~/.claude/mcp_settings.json
- * This is the main function to be called from other scripts
- *
- * @param {Object} options
- * @param {string} options.serverPath - Path to server-bootstrap.js
- * @param {boolean} options.silent - Suppress console output
- * @returns {Object} - { success: boolean, configPath: string, error?: string }
- */
-export function configureMcpSettings(options = {}) {
- const {
- serverPath,
- silent = false
- } = options;
-
- const configPath = MCP_SETTINGS_PATH;
-
- if (!silent) {
- console.log('\n📝 Configuring MCP settings...');
- }
-
- // Validate server path
- if (!serverPath) {
- const error = 'Server path is required';
- if (!silent) console.error(` ❌ ${error}`);
- return { success: false, configPath, error };
- }
-
- // Create config
- let config = { mcpServers: {} };
-
- // Read existing config
- if (fs.existsSync(configPath)) {
- try {
- const content = fs.readFileSync(configPath, 'utf8').trim();
- if (content) {
- config = JSON.parse(content);
- if (!config.mcpServers) config.mcpServers = {};
- }
- } catch (e) {
- if (!silent) {
- console.warn(` ⚠️ Could not parse existing config, creating new one`);
- }
- config = { mcpServers: {} };
- }
- }
-
- // Build server configuration
- const serverConfig = {
- command: 'node',
- args: [serverPath],
- env: {
- NODE_ENV: 'production'
- }
- };
-
- // Update or add memesh entry
- config.mcpServers.memesh = serverConfig;
-
- // Remove legacy entry if exists
- if (config.mcpServers['claude-code-buddy']) {
- delete config.mcpServers['claude-code-buddy'];
- if (!silent) console.log(' ✅ Removed legacy "claude-code-buddy" entry');
- }
-
- // Write config
- try {
- // Use recursive mkdir which handles existing directories safely (avoids TOCTOU race condition)
- const configDir = path.dirname(configPath);
- fs.mkdirSync(configDir, { recursive: true });
-
- fs.writeFileSync(configPath, JSON.stringify(config, null, 2) + '\n', 'utf8');
-
- if (!silent) {
- console.log(` ✅ MCP settings configured at: ${configPath}`);
- console.log(` ✅ Server path: ${serverPath}`);
- }
-
- return { success: true, configPath };
- } catch (error) {
- const errorMsg = `Failed to write config: ${error.message}`;
- if (!silent) console.error(` ❌ ${errorMsg}`);
- return { success: false, configPath, error: errorMsg };
- }
-}
-
-/**
- * Check if MeMesh is configured in mcp_settings.json
- * @returns {Object} - { configured: boolean, serverPath?: string }
- */
-export function checkMcpConfiguration() {
- const configPath = MCP_SETTINGS_PATH;
-
- if (!fs.existsSync(configPath)) {
- return { configured: false };
- }
-
- try {
- const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
- const memeshConfig = config.mcpServers?.memesh || config.mcpServers?.['claude-code-buddy'];
-
- if (memeshConfig) {
- return {
- configured: true,
- serverPath: memeshConfig.args?.[0]
- };
- }
-
- return { configured: false };
- } catch (_err) {
- return { configured: false, error: _err.message };
- }
-}
-
-/**
- * Verify installation files exist
- * @param {string} basePath - Base path to check from
- * @returns {Object} - { valid: boolean, missing: string[] }
- */
-export function verifyInstallation(basePath = process.cwd()) {
- const requiredFiles = [
- 'dist/mcp/server-bootstrap.js',
- 'dist/index.js',
- 'package.json'
- ];
-
- const missing = requiredFiles.filter(file =>
- !fs.existsSync(path.join(basePath, file))
- );
-
- if (missing.length > 0) {
- return { valid: false, missing };
- }
-
- return { valid: true, missing: [] };
-}
-
-/**
- * Get the MCP settings file path
- * @returns {string}
- */
-export function getMcpSettingsPath() {
- return MCP_SETTINGS_PATH;
-}
-
-// ============================================================================
-// Command Line Interface
-// ============================================================================
-const command = process.argv[2];
-const arg = process.argv[3];
-
-switch (command) {
- case 'add-to-mcp':
- case 'configure': {
- // Usage: node install-helpers.js configure
- if (!arg) {
- console.error('❌ Server path required');
- console.error('Usage: node install-helpers.js configure ');
- process.exit(1);
- }
- const result = configureMcpSettings({
- serverPath: arg
- });
- process.exit(result.success ? 0 : 1);
- break;
- }
-
- case 'verify': {
- // Usage: node install-helpers.js verify [base-path]
- const basePath = arg || process.cwd();
- const result = verifyInstallation(basePath);
- if (result.valid) {
- console.log('✅ All required files present');
- process.exit(0);
- } else {
- console.error('❌ Missing required files:', result.missing.join(', '));
- process.exit(1);
- }
- break;
- }
-
- case 'check': {
- // Usage: node install-helpers.js check
- const status = checkMcpConfiguration();
- if (status.configured) {
- console.log('✅ MeMesh is configured in MCP settings');
- console.log(` Server path: ${status.serverPath || 'unknown'}`);
- process.exit(0);
- } else {
- console.log('❌ MeMesh is NOT configured in MCP settings');
- console.log(` Expected config: ${MCP_SETTINGS_PATH}`);
- process.exit(1);
- }
- break;
- }
-
- case 'help':
- default:
- console.log('MeMesh Installation Helpers');
- console.log('');
- console.log('Usage: node install-helpers.js [args]');
- console.log('');
- console.log('Commands:');
- console.log(' configure - Configure MeMesh in ~/.claude/mcp_settings.json');
- console.log(' verify [base-path] - Verify installation files exist');
- console.log(' check - Check if MeMesh is configured');
- console.log(' help - Show this help');
- console.log('');
- console.log('Examples:');
- console.log(' node install-helpers.js configure /path/to/server-bootstrap.js');
- console.log(' node install-helpers.js check');
- break;
-}
diff --git a/scripts/install.sh b/scripts/install.sh
deleted file mode 100755
index 0627066e..00000000
--- a/scripts/install.sh
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/bin/bash
-
-# MeMesh - Interactive Installation Script
-# This script guides you through MeMesh setup step-by-step
-
-set -e # Exit on error
-
-# Colors for output
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-CYAN='\033[0;36m'
-NC='\033[0m' # No Color
-
-# Helper functions
-print_step() {
- echo -e "${BLUE}▶ $1${NC}"
-}
-
-print_success() {
- echo -e "${GREEN}✓ $1${NC}"
-}
-
-print_error() {
- echo -e "${RED}✗ $1${NC}"
-}
-
-print_warning() {
- echo -e "${YELLOW}⚠ $1${NC}"
-}
-
-print_info() {
- echo -e "${CYAN}ℹ $1${NC}"
-}
-
-# ASCII Art Banner
-cat << "EOF"
- _____ _ _ ___ _ ____ _ _
- / ____| | | | / __| | | | _ \ | | | |
-| | | | __ _ _ _ __| | ___ | | ___ __| | ___ | |_) |_ _ __| | __| |_ _
-| | | |/ _` | | | |/ _` |/ _ \| | / _ \ / _` |/ _ \ | _ <| | | |/ _` |/ _` | | | |
-| |____| | (_| | |_| | (_| | __/| |_| (_) | (_| | __/ | |_) | |_| | (_| | (_| | |_| |
- \_____|_|\__,_|\__,_|\__,_|\___| \___\___/ \__,_|\___| |____/ \__,_|\__,_|\__,_|\__, |
- __/ |
- |___/
-EOF
-
-echo ""
-echo "Welcome to MeMesh installation!"
-echo "This interactive guide will set up MeMesh and show you how to use it."
-echo ""
-
-# Step 1: Check prerequisites
-print_step "Step 1/9: Checking prerequisites..."
-
-# Check Node.js
-if ! command -v node &> /dev/null; then
- print_error "Node.js is not installed"
- echo "Please install Node.js 20+ from https://nodejs.org"
- exit 1
-fi
-
-NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1)
-if [ "$NODE_VERSION" -lt 20 ]; then
- print_error "Node.js version 20+ is required (found: $(node -v))"
- exit 1
-fi
-print_success "Node.js $(node -v) found"
-
-# Check npm
-if ! command -v npm &> /dev/null; then
- print_error "npm is not installed"
- exit 1
-fi
-print_success "npm $(npm -v) found"
-
-# Check git
-if ! command -v git &> /dev/null; then
- print_warning "git not found (optional, but recommended)"
-else
- print_success "git $(git --version | cut -d' ' -f3) found"
-fi
-
-echo ""
-print_info "💡 Tip: MeMesh uses your Claude Code subscription - no extra API keys needed!"
-echo ""
-
-# Step 2: Install dependencies
-print_step "Step 2/9: Installing dependencies..."
-npm install
-print_success "Dependencies installed"
-
-echo ""
-print_info "💡 What MeMesh does: Provides workflow guidance, smart planning, and project memory"
-echo ""
-
-# Step 3: Build project
-print_step "Step 3/9: Building MeMesh..."
-npm run build
-print_success "Build completed"
-
-echo ""
-print_info "💡 MeMesh focuses on high-signal guidance without extra overhead"
-echo ""
-
-# Step 4: Check system resources
-print_step "Step 4/9: Checking system resources..."
-echo ""
-if [ -f "scripts/check-system-resources.js" ]; then
- node scripts/check-system-resources.js || true # Don't fail on error
-else
- print_warning "Resource check skipped (scripts/check-system-resources.js not found)"
-fi
-echo ""
-
-# Step 5: Configure environment (optional)
-print_step "Step 5/9: Configuring environment..."
-
-# Check if .env exists
-if [ -f .env ]; then
- print_success ".env file already exists"
-else
- cp .env.example .env
- print_success ".env file created from template"
- echo ""
- echo "Note: MeMesh uses your existing Claude Code subscription."
- echo "No API keys are needed - it works through Claude Code's MCP integration."
- echo ""
-fi
-
-# Step 6: Configure MCP
-print_step "Step 6/9: Configuring MCP integration..."
-
-MeMesh_PATH="$(pwd)/dist/mcp/server-bootstrap.js"
-
-# Add MeMesh to MCP config using Node.js helper
-if node scripts/install-helpers.js add-to-mcp "$MeMesh_PATH"; then
- print_success "MeMesh added to Claude Code MCP configuration"
-else
- print_warning "Could not update Claude Code config automatically."
- echo ""
- echo "Manual setup:"
- echo "1. Open ~/.claude.json"
- echo "2. Add this MCP server entry:"
- echo " {"
- echo " \"mcpServers\": {"
- echo " \"memesh\": {"
- echo " \"type\": \"stdio\","
- echo " \"command\": \"node\","
- echo " \"args\": [\"$MeMesh_PATH\"],"
- echo " \"env\": {"
- echo " \"NODE_ENV\": \"production\""
- echo " }"
- echo " }"
- echo " }"
- echo " }"
-fi
-
-echo ""
-print_info "💡 MeMesh is now registered with Claude Code - it will activate when you start Claude Code"
-echo ""
-
-# Step 7: Test installation
-print_step "Step 7/9: Testing installation..."
-echo ""
-echo "Running validation tests (this may take 30-60 seconds)..."
-echo ""
-
-# Progress bar function
-show_progress() {
- local duration=$1
- local width=40
- local elapsed=0
- local chars="█▓▒░"
-
- while [ $elapsed -lt $duration ]; do
- local progress=$((elapsed * width / duration))
- local remaining=$((width - progress))
-
- # Build progress bar
- printf "\r ["
- for ((i=0; i "$TEST_OUTPUT_FILE" 2>&1 &
-TEST_PID=$!
-
-# Animated spinner while tests run
-spin_chars='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
-test_start=$(date +%s)
-echo -e " ${CYAN}Running tests...${NC}"
-
-while kill -0 $TEST_PID 2>/dev/null; do
- for ((i=0; i<${#spin_chars}; i++)); do
- if ! kill -0 $TEST_PID 2>/dev/null; then
- break 2
- fi
- elapsed=$(($(date +%s) - test_start))
- printf "\r ${spin_chars:$i:1} Testing... (%ds)" $elapsed
- sleep 0.1
- done
-done
-
-# Wait for test to complete and check result
-wait $TEST_PID
-TEST_EXIT_CODE=$?
-
-# Clear spinner line
-printf "\r \r"
-
-# Show final result
-if [ $TEST_EXIT_CODE -eq 0 ]; then
- elapsed=$(($(date +%s) - test_start))
- print_success "All tests passed (${elapsed}s)"
-else
- if grep -q "passed" "$TEST_OUTPUT_FILE"; then
- PASS_COUNT=$(grep -oE "[0-9]+ passed" "$TEST_OUTPUT_FILE" | tail -1 | grep -oE "[0-9]+")
- FAIL_COUNT=$(grep -oE "[0-9]+ failed" "$TEST_OUTPUT_FILE" | tail -1 | grep -oE "[0-9]+" || echo "0")
- print_warning "Tests: ${PASS_COUNT:-?} passed, ${FAIL_COUNT:-some} failed (installation still successful)"
- else
- print_warning "Some tests failed (installation still successful)"
- fi
-fi
-rm -f "$TEST_OUTPUT_FILE"
-
-# Step 8: Usage Demonstration
-print_step "Step 8/9: Basic Usage Demo"
-echo ""
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-echo "What can MeMesh do for you?"
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-echo ""
-echo "${CYAN}🤖 Smart Task Routing${NC}"
-echo " MeMesh routes requests through a focused workflow engine"
-echo " to provide actionable guidance and higher-quality outputs."
-echo ""
-echo "${CYAN}💡 Example Prompts to Try in Claude Code:${NC}"
-echo " \"Analyze my codebase architecture\""
-echo " \"Generate tests for auth.ts\""
-echo " \"Review this code for security issues\""
-echo " \"Optimize this database query\""
-echo " \"Help me debug this async bug\""
-echo ""
-echo "${CYAN}📊 Project Memory:${NC}"
-echo " MeMesh records decisions, changes, and test outcomes"
-echo " into a local knowledge graph for future recall."
-echo ""
-echo "${CYAN}🎯 Capability Routing:${NC}"
-echo " MeMesh tailors prompts based on task type and project context"
-echo " to keep responses focused and actionable."
-echo ""
-
-# Step 9: Verify MCP server
-print_step "Step 9/9: Verifying MCP server..."
-
-# Try to start MCP server (timeout after 3 seconds)
-if command -v timeout &> /dev/null; then
- timeout 3 node dist/mcp/server-bootstrap.js &> /dev/null \
- && print_success "MCP server starts successfully" \
- || print_success "MCP server configured (will start when Claude Code connects)"
-else
- node dist/mcp/server-bootstrap.js &> /dev/null &
- MCP_PID=$!
- sleep 3
- if kill -0 $MCP_PID 2>/dev/null; then
- kill $MCP_PID &> /dev/null || true
- print_success "MCP server starts successfully"
- else
- print_success "MCP server configured (will start when Claude Code connects)"
- fi
-fi
-
-# Installation complete
-echo ""
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-print_success "Installation complete! 🎉"
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-echo ""
-echo "Next steps:"
-echo " 1. ${CYAN}Restart Claude Code${NC} (if running)"
-echo " 2. ${CYAN}Try the example prompts${NC} shown above"
-echo ""
-echo "Documentation:"
-echo " • Quick Start: README.md"
-echo " • Full Guide: docs/README.md"
-echo " • Commands: docs/COMMANDS.md"
-print_success "Happy coding with your new buddy! 🤖"
-echo ""
diff --git a/scripts/manage-mcp-processes.sh b/scripts/manage-mcp-processes.sh
deleted file mode 100755
index 6e42073f..00000000
--- a/scripts/manage-mcp-processes.sh
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/bin/bash
-
-# MeMesh MCP Process Management Script
-# Helps users manage MeMesh MCP server processes
-
-set -e
-
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
-
-# Colors for output
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-NC='\033[0m' # No Color
-
-# Function to print colored output
-print_info() {
- echo -e "${BLUE}ℹ${NC} $1"
-}
-
-print_success() {
- echo -e "${GREEN}✓${NC} $1"
-}
-
-print_warning() {
- echo -e "${YELLOW}⚠${NC} $1"
-}
-
-print_error() {
- echo -e "${RED}✗${NC} $1"
-}
-
-# Function to list all MeMesh MCP processes
-list_processes() {
- print_info "Listing all MeMesh MCP server processes..."
- echo ""
-
- # Find all MeMesh processes
- PROCESSES=$(ps aux | grep -E "memesh|claude-code-buddy|server-bootstrap" | grep -v grep | grep -v "manage-mcp-processes")
-
- if [ -z "$PROCESSES" ]; then
- print_success "No MeMesh MCP server processes found"
- return 0
- fi
-
- echo "$PROCESSES" | while read -r line; do
- PID=$(echo "$line" | awk '{print $2}')
- PARENT_PID=$(ps -p "$PID" -o ppid= 2>/dev/null | tr -d ' ')
- ELAPSED=$(ps -p "$PID" -o etime= 2>/dev/null | tr -d ' ')
- COMMAND=$(echo "$line" | awk '{for(i=11;i<=NF;i++) printf $i" "; print ""}')
-
- echo -e "${BLUE}PID${NC}: $PID | ${BLUE}PPID${NC}: $PARENT_PID | ${BLUE}Uptime${NC}: $ELAPSED"
- echo -e " ${BLUE}Command${NC}: $COMMAND"
-
- # Check if parent process exists or is init (orphaned)
- if ! ps -p "$PARENT_PID" > /dev/null 2>&1 || [ "$PARENT_PID" -eq 1 ]; then
- print_warning " ⚠ Orphaned process (parent process no longer exists)"
- fi
-
- echo ""
- done
-
- # Count processes
- COUNT=$(echo "$PROCESSES" | wc -l | tr -d ' ')
- print_info "Found $COUNT MeMesh MCP server process(es)"
-}
-
-# Function to kill all MeMesh MCP processes
-kill_all_processes() {
- print_warning "Preparing to terminate all MeMesh MCP server processes..."
-
- # Find all MeMesh processes
- PIDS=$(ps aux | grep -E "memesh|claude-code-buddy|server-bootstrap" | grep -v grep | grep -v "manage-mcp-processes" | awk '{print $2}')
-
- if [ -z "$PIDS" ]; then
- print_success "No MeMesh MCP server processes found"
- return 0
- fi
-
- # Ask for confirmation
- echo -e "${YELLOW}The following processes will be terminated:${NC}"
- echo "$PIDS"
- echo ""
- read -p "Are you sure you want to terminate these processes? (y/N): " -n 1 -r
- echo ""
-
- if [[ ! $REPLY =~ ^[Yy]$ ]]; then
- print_info "Operation cancelled"
- return 0
- fi
-
- # Kill processes
- for PID in $PIDS; do
- if kill -15 "$PID" 2>/dev/null; then
- print_success "Terminated process $PID"
- else
- print_error "Failed to terminate process $PID (may require sudo)"
- fi
- done
-
- # Wait a bit and check if any processes are still running
- sleep 1
-
- REMAINING=$(ps aux | grep -E "memesh|claude-code-buddy|server-bootstrap" | grep -v grep | grep -v "manage-mcp-processes" | wc -l | tr -d ' ')
-
- if [ "$REMAINING" -eq 0 ]; then
- print_success "All MeMesh MCP server processes terminated"
- else
- print_warning "Still $REMAINING process(es) running, attempting force termination..."
- PIDS=$(ps aux | grep -E "memesh|claude-code-buddy|server-bootstrap" | grep -v grep | grep -v "manage-mcp-processes" | awk '{print $2}')
- for PID in $PIDS; do
- if kill -9 "$PID" 2>/dev/null; then
- print_success "Force-terminated process $PID"
- fi
- done
- fi
-}
-
-# Function to check MeMesh MCP configuration
-check_config() {
- print_info "Checking MeMesh MCP configuration..."
- echo ""
-
- CONFIG_PATH="$HOME/.claude/config.json"
-
- if [ ! -f "$CONFIG_PATH" ]; then
- print_error "Configuration file not found: $CONFIG_PATH"
- return 1
- fi
-
- print_success "Configuration file exists: $CONFIG_PATH"
-
- # Check if MeMesh is configured
- if grep -q "memesh|claude-code-buddy" "$CONFIG_PATH"; then
- print_success "MeMesh MCP server is configured"
-
- # Extract MeMesh config
- echo ""
- print_info "MeMesh Configuration:"
- # Use jq if available, otherwise use grep
- if command -v jq > /dev/null 2>&1; then
- jq '.mcpServers["memesh|claude-code-buddy"]' "$CONFIG_PATH"
- else
- grep -A 10 "memesh|claude-code-buddy" "$CONFIG_PATH"
- fi
- else
- print_warning "MeMesh MCP server not configured in $CONFIG_PATH"
- print_info "Please run: npm run setup"
- fi
-}
-
-# Function to restart MeMesh MCP server
-restart_server() {
- print_info "Restarting MeMesh MCP server..."
- echo ""
-
- # Kill existing processes
- kill_all_processes
-
- echo ""
- print_info "MeMesh MCP server stopped"
- print_info "MCP server will automatically restart when you launch Claude Code CLI"
-}
-
-# Function to show orphaned processes
-show_orphaned() {
- print_info "Listing orphaned processes (parent process no longer exists)..."
- echo ""
-
- PROCESSES=$(ps aux | grep -E "memesh|claude-code-buddy|server-bootstrap" | grep -v grep | grep -v "manage-mcp-processes")
-
- if [ -z "$PROCESSES" ]; then
- print_success "No MeMesh MCP server processes found"
- return 0
- fi
-
- ORPHAN_COUNT=0
-
- while read -r line; do
- PID=$(echo "$line" | awk '{print $2}')
- PARENT_PID=$(ps -p "$PID" -o ppid= 2>/dev/null | tr -d ' ')
-
- # Check if parent process exists
- if ! ps -p "$PARENT_PID" > /dev/null 2>&1 || [ "$PARENT_PID" -eq 1 ]; then
- ORPHAN_COUNT=$((ORPHAN_COUNT + 1))
- ELAPSED=$(ps -p "$PID" -o etime= 2>/dev/null | tr -d ' ')
- print_warning "Orphaned process found - PID: $PID, Uptime: $ELAPSED"
- fi
- done <<< "$PROCESSES"
-
- if [ $ORPHAN_COUNT -eq 0 ]; then
- print_success "No orphaned processes found"
- else
- echo ""
- print_info "Found $ORPHAN_COUNT orphaned process(es)"
- print_info "Use 'npm run processes:kill' to clean up these processes"
- fi
-}
-
-# Function to show help
-show_help() {
- cat << EOF
-${BLUE}MeMesh MCP Process Management Script${NC}
-
-Usage: $0 [COMMAND]
-
-${YELLOW}Available commands:${NC}
- ${GREEN}list${NC} List all MeMesh MCP server processes
- ${GREEN}kill${NC} Terminate all MeMesh MCP server processes
- ${GREEN}restart${NC} Restart MeMesh MCP server (terminate all processes)
- ${GREEN}config${NC} Check MeMesh MCP configuration
- ${GREEN}orphaned${NC} List orphaned processes (parent process no longer exists)
- ${GREEN}help${NC} Show this help message
-
-${YELLOW}Examples:${NC}
- $0 list # List all processes
- $0 kill # Terminate all processes
- $0 restart # Restart MCP server
- $0 config # Check configuration
- $0 orphaned # List orphaned processes
-
-${YELLOW}Notes:${NC}
-- Claude Code CLI automatically starts an MCP server when a session begins
-- Normally, each Claude Code CLI session has 1 MCP server process
-- If you find multiple orphaned processes, use the 'kill' command to clean up
-- After cleanup, the MCP server will automatically restart when you launch Claude Code CLI
-EOF
-}
-
-# Main script
-case "${1:-help}" in
- list)
- list_processes
- ;;
- kill)
- kill_all_processes
- ;;
- restart)
- restart_server
- ;;
- config)
- check_config
- ;;
- orphaned)
- show_orphaned
- ;;
- help|--help|-h)
- show_help
- ;;
- *)
- print_error "Unknown command: $1"
- echo ""
- show_help
- exit 1
- ;;
-esac
diff --git a/scripts/migrate-from-ccb.sh b/scripts/migrate-from-ccb.sh
deleted file mode 100755
index c6125944..00000000
--- a/scripts/migrate-from-ccb.sh
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/bin/bash
-
-# ==============================================================================
-# MeMesh Data Migration Script
-# Migrates data from Claude Code Buddy (~/.claude-code-buddy) to MeMesh (~/.memesh)
-# ==============================================================================
-
-set -e
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-NC='\033[0m' # No Color
-
-# Directories
-OLD_DIR="$HOME/.claude-code-buddy"
-NEW_DIR="$HOME/.memesh"
-BACKUP_DIR="$HOME/.memesh-migration-backup-$(date +%Y%m%d-%H%M%S)"
-
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo -e "${BLUE} MeMesh Data Migration Tool${NC}"
-echo -e "${BLUE} From: Claude Code Buddy → MeMesh${NC}"
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo ""
-
-# ==============================================================================
-# Safety Guarantees
-# ==============================================================================
-echo -e "${GREEN}🛡️ Safety Guarantees:${NC}"
-echo -e "${GREEN} ✓ Original data preserved - never modified or deleted${NC}"
-echo -e "${GREEN} ✓ Full backup created before any changes${NC}"
-echo -e "${GREEN} ✓ Rollback possible - restore from backup anytime${NC}"
-echo -e "${GREEN} ✓ Idempotent - safe to run multiple times${NC}"
-echo -e "${GREEN} ✓ Atomic operations - all or nothing migration${NC}"
-echo ""
-
-# ==============================================================================
-# Step 1: Pre-flight checks
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 1: Pre-flight checks${NC}"
-
-# Check if old directory exists
-if [ ! -d "$OLD_DIR" ]; then
- echo -e "${GREEN} ✓ No legacy data found at $OLD_DIR${NC}"
- echo -e "${GREEN} Nothing to migrate - you're good to go!${NC}"
- exit 0
-fi
-
-echo -e "${GREEN} ✓ Found legacy data at: $OLD_DIR${NC}"
-
-# Check if new directory already exists
-if [ -d "$NEW_DIR" ]; then
- echo -e "${YELLOW} ⚠ Target directory already exists: $NEW_DIR${NC}"
- echo ""
- echo " Options:"
- echo " 1. Merge data (append to existing)"
- echo " 2. Cancel migration"
- echo ""
- read -p " Enter your choice (1/2): " CHOICE
-
- case $CHOICE in
- 1)
- echo -e "${YELLOW} → Proceeding with merge...${NC}"
- ;;
- 2)
- echo -e "${BLUE} Migration cancelled.${NC}"
- exit 0
- ;;
- *)
- echo -e "${RED} Invalid choice. Exiting.${NC}"
- exit 1
- ;;
- esac
-else
- echo -e "${GREEN} ✓ Target directory does not exist${NC}"
-fi
-
-# Check disk space
-OLD_SIZE=$(du -sh "$OLD_DIR" 2>/dev/null | awk '{print $1}')
-echo -e "${GREEN} ✓ Data size: $OLD_SIZE${NC}"
-
-# Check for running MCP servers
-if pgrep -f "claude-code-buddy|memesh|server-bootstrap" > /dev/null; then
- echo -e "${YELLOW} ⚠ MCP server processes detected${NC}"
- echo ""
- read -p " Stop all MCP servers before proceeding? (y/n): " STOP_SERVERS
-
- if [[ "$STOP_SERVERS" =~ ^[Yy]$ ]]; then
- pkill -f "claude-code-buddy|memesh|server-bootstrap" 2>/dev/null || true
- sleep 2
- echo -e "${GREEN} ✓ Stopped MCP servers${NC}"
- else
- echo -e "${YELLOW} ⚠ Proceeding with servers running (not recommended)${NC}"
- fi
-else
- echo -e "${GREEN} ✓ No running MCP servers detected${NC}"
-fi
-
-echo ""
-
-# ==============================================================================
-# Step 2: Create backup
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 2: Creating backup${NC}"
-
-mkdir -p "$BACKUP_DIR"
-echo -e "${GREEN} ✓ Created backup directory: $BACKUP_DIR${NC}"
-
-# Copy old directory to backup
-if cp -r "$OLD_DIR" "$BACKUP_DIR/claude-code-buddy-backup"; then
- echo -e "${GREEN} ✓ Backup created successfully${NC}"
-else
- echo -e "${RED} ✗ Backup failed${NC}"
- exit 1
-fi
-
-echo ""
-
-# ==============================================================================
-# Step 3: Checkpoint SQLite databases
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 3: Preparing databases${NC}"
-
-# SQLite databases to checkpoint
-SQLITE_DBS=(
- "database.db"
- "knowledge-graph.db"
- "evolution-store.db"
-)
-
-# Check if sqlite3 is available
-if command -v sqlite3 &> /dev/null; then
- for DB in "${SQLITE_DBS[@]}"; do
- if [ -f "$OLD_DIR/$DB" ]; then
- echo -e "${BLUE} → Checkpointing: $DB${NC}"
- if sqlite3 "$OLD_DIR/$DB" "PRAGMA wal_checkpoint(TRUNCATE);" 2>/dev/null; then
- echo -e "${GREEN} ✓ Checkpointed: $DB${NC}"
- else
- echo -e "${YELLOW} ⚠ Could not checkpoint: $DB (continuing...)${NC}"
- fi
- fi
- done
-else
- echo -e "${YELLOW} ⚠ sqlite3 not found - skipping WAL checkpoint${NC}"
-fi
-
-echo ""
-
-# ==============================================================================
-# Step 4: Migrate data (Atomic)
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 4: Migrating data (atomic operation)${NC}"
-
-# Create temporary directory for atomic migration
-TEMP_DIR=$(mktemp -d)
-echo -e "${BLUE} → Using temporary directory: $TEMP_DIR${NC}"
-
-# Setup cleanup handler to remove temp directory on error/exit
-cleanup_migration() {
- if [ -d "$TEMP_DIR" ]; then
- echo -e "${YELLOW} → Cleaning up temporary directory${NC}"
- rm -rf "$TEMP_DIR"
- fi
-}
-trap cleanup_migration EXIT ERR
-
-# List of files/directories to migrate
-ITEMS_TO_MIGRATE=(
- "database.db"
- "database.db-shm"
- "database.db-wal"
- "knowledge-graph.db"
- "knowledge-graph.db-shm"
- "knowledge-graph.db-wal"
- "evolution-store.db"
- "evolution-store.db-shm"
- "evolution-store.db-wal"
- "logs/"
- "cache/"
-)
-
-MIGRATED_COUNT=0
-FAILED_COUNT=0
-TOTAL_ITEMS=0
-
-# Count total items to migrate
-for ITEM in "${ITEMS_TO_MIGRATE[@]}"; do
- if [ -e "$OLD_DIR/$ITEM" ]; then
- TOTAL_ITEMS=$((TOTAL_ITEMS + 1))
- fi
-done
-
-echo -e "${BLUE} → Found $TOTAL_ITEMS items to migrate${NC}"
-echo ""
-
-# Copy items to temporary directory with progress
-CURRENT_ITEM=0
-for ITEM in "${ITEMS_TO_MIGRATE[@]}"; do
- if [ -e "$OLD_DIR/$ITEM" ]; then
- CURRENT_ITEM=$((CURRENT_ITEM + 1))
- echo -e "${BLUE} [$CURRENT_ITEM/$TOTAL_ITEMS] Copying: $ITEM${NC}"
-
- if cp -r "$OLD_DIR/$ITEM" "$TEMP_DIR/$ITEM" 2>/dev/null; then
- echo -e "${GREEN} ✓ Success: $ITEM${NC}"
- MIGRATED_COUNT=$((MIGRATED_COUNT + 1))
- else
- echo -e "${RED} ✗ Failed: $ITEM${NC}"
- FAILED_COUNT=$((FAILED_COUNT + 1))
- fi
- fi
-done
-
-echo ""
-echo -e "${BLUE} Migration Summary:${NC}"
-echo -e "${GREEN} Migrated: $MIGRATED_COUNT items${NC}"
-if [ $FAILED_COUNT -gt 0 ]; then
- echo -e "${RED} Failed: $FAILED_COUNT items${NC}"
-fi
-
-# Verify integrity before atomic commit
-echo ""
-echo -e "${YELLOW}▶ Step 4.5: Verifying integrity${NC}"
-
-INTEGRITY_PASSED=true
-
-# Verify file counts
-TEMP_FILE_COUNT=$(find "$TEMP_DIR" -type f | wc -l | tr -d ' ')
-echo -e "${BLUE} → Files copied: $TEMP_FILE_COUNT${NC}"
-
-# Verify key database files
-for DB in "${SQLITE_DBS[@]}"; do
- if [ -f "$OLD_DIR/$DB" ] && [ -f "$TEMP_DIR/$DB" ]; then
- OLD_SIZE=$(wc -c < "$OLD_DIR/$DB" 2>/dev/null || echo "0")
- TEMP_SIZE=$(wc -c < "$TEMP_DIR/$DB" 2>/dev/null || echo "0")
-
- if [ "$OLD_SIZE" -eq "$TEMP_SIZE" ]; then
- echo -e "${GREEN} ✓ Verified: $DB ($TEMP_SIZE bytes)${NC}"
- else
- echo -e "${RED} ✗ Size mismatch: $DB (expected: $OLD_SIZE, got: $TEMP_SIZE)${NC}"
- INTEGRITY_PASSED=false
- fi
- fi
-done
-
-# Atomic commit: move temp directory to final location
-if [ "$INTEGRITY_PASSED" = true ] && [ $FAILED_COUNT -eq 0 ]; then
- echo ""
- echo -e "${YELLOW}▶ Step 4.6: Atomic commit${NC}"
-
- if [ -d "$NEW_DIR" ]; then
- # Merge mode: copy from temp to new dir
- echo -e "${BLUE} → Merging with existing directory${NC}"
- if cp -rn "$TEMP_DIR/"* "$NEW_DIR/" 2>/dev/null; then
- echo -e "${GREEN} ✓ Atomic commit successful${NC}"
- else
- echo -e "${RED} ✗ Atomic commit failed${NC}"
- rm -rf "$TEMP_DIR"
- exit 1
- fi
- else
- # Clean migration: atomic rename
- echo -e "${BLUE} → Creating new directory${NC}"
- if mv "$TEMP_DIR" "$NEW_DIR" 2>/dev/null; then
- echo -e "${GREEN} ✓ Atomic commit successful${NC}"
- else
- echo -e "${RED} ✗ Atomic commit failed${NC}"
- rm -rf "$TEMP_DIR"
- exit 1
- fi
- fi
-
- # Clean up temp directory if not already moved
- [ -d "$TEMP_DIR" ] && rm -rf "$TEMP_DIR"
-else
- echo -e "${RED} ✗ Integrity check failed - rolling back${NC}"
- rm -rf "$TEMP_DIR"
- exit 1
-fi
-
-echo ""
-
-# ==============================================================================
-# Step 5: Verify migration
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 5: Final verification${NC}"
-
-VERIFICATION_PASSED=true
-
-# Check if key files exist
-KEY_FILES=(
- "database.db"
- "knowledge-graph.db"
-)
-
-for FILE in "${KEY_FILES[@]}"; do
- if [ -f "$NEW_DIR/$FILE" ]; then
- NEW_SIZE=$(wc -c < "$NEW_DIR/$FILE" 2>/dev/null || echo "0")
- OLD_SIZE=$(wc -c < "$OLD_DIR/$FILE" 2>/dev/null || echo "0")
-
- if [ "$NEW_SIZE" -eq "$OLD_SIZE" ]; then
- echo -e "${GREEN} ✓ Verified: $FILE ($NEW_SIZE bytes)${NC}"
- else
- echo -e "${YELLOW} ⚠ Size mismatch: $FILE (old: $OLD_SIZE, new: $NEW_SIZE)${NC}"
- VERIFICATION_PASSED=false
- fi
- elif [ -f "$OLD_DIR/$FILE" ]; then
- echo -e "${RED} ✗ Missing: $FILE${NC}"
- VERIFICATION_PASSED=false
- fi
-done
-
-echo ""
-
-# ==============================================================================
-# Step 6: Update MCP configuration (optional)
-# ==============================================================================
-echo -e "${YELLOW}▶ Step 6: MCP configuration check${NC}"
-
-MCP_CONFIG_PATHS=(
- "$HOME/.claude/config.json"
- "$HOME/.config/claude/claude_desktop_config.json"
-)
-
-MCP_CONFIG_FOUND=false
-
-for CONFIG_PATH in "${MCP_CONFIG_PATHS[@]}"; do
- if [ -f "$CONFIG_PATH" ]; then
- MCP_CONFIG_FOUND=true
- echo -e "${GREEN} ✓ Found MCP config: $CONFIG_PATH${NC}"
-
- # Check if config contains old server name
- if grep -q "claude-code-buddy" "$CONFIG_PATH"; then
- echo -e "${YELLOW} ⚠ Config still references 'claude-code-buddy'${NC}"
- echo ""
- echo " Your MCP configuration needs to be updated."
- echo " The server name should be changed from 'claude-code-buddy' to 'memesh'"
- echo ""
- echo " Manual update required:"
- echo " 1. Open: $CONFIG_PATH"
- echo " 2. Find: \"claude-code-buddy\""
- echo " 3. Replace with: \"memesh\""
- echo ""
- else
- echo -e "${GREEN} ✓ Config looks up to date${NC}"
- fi
- fi
-done
-
-if [ "$MCP_CONFIG_FOUND" = false ]; then
- echo -e "${YELLOW} ⚠ No MCP config found${NC}"
- echo -e "${BLUE} You may need to configure MCP manually${NC}"
-fi
-
-echo ""
-
-# ==============================================================================
-# Final summary
-# ==============================================================================
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo -e "${BLUE} Migration Summary${NC}"
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo ""
-
-if [ "$VERIFICATION_PASSED" = true ] && [ $FAILED_COUNT -eq 0 ]; then
- echo -e "${GREEN}✅ Migration completed successfully!${NC}"
- echo ""
- echo -e "${GREEN}📊 Migration Summary:${NC}"
- echo " From: $OLD_DIR"
- echo " To: $NEW_DIR"
- echo " Items migrated: $MIGRATED_COUNT"
- echo ""
- echo -e "${GREEN}💾 Backup Location:${NC}"
- echo " $BACKUP_DIR"
- echo ""
- echo -e "${YELLOW}📋 Next Steps (Complete in Order):${NC}"
- echo ""
- echo -e "${BLUE}1. Update MCP Configuration${NC}"
- echo " Edit your MCP config file and change server name:"
- echo " • macOS: ~/.claude/config.json"
- echo " • Linux: ~/.config/claude/claude_desktop_config.json"
- echo ""
- echo " Find and replace:"
- echo " \"claude-code-buddy\" → \"memesh\""
- echo ""
- echo " Or use this command:"
- echo -e " ${GREEN}sed -i.bak 's/claude-code-buddy/memesh/g' ~/.claude/config.json${NC}"
- echo ""
- echo -e "${BLUE}2. Restart Claude Code${NC}"
- echo " Quit and restart Claude Code application to load new configuration"
- echo ""
- echo -e "${BLUE}3. Verify Migration${NC}"
- echo " Test MeMesh tools are working:"
- echo -e " ${GREEN}memesh-entities list${NC}"
- echo -e " ${GREEN}memesh-relations list${NC}"
- echo ""
- echo -e "${BLUE}4. Cleanup (After Verification)${NC}"
- echo " Once you've verified everything works, you can clean up:"
- echo ""
- echo " Remove old data:"
- echo -e " ${GREEN}rm -rf $OLD_DIR${NC}"
- echo ""
- echo " Remove backup (keep until fully verified!):"
- echo -e " ${GREEN}rm -rf $BACKUP_DIR${NC}"
- echo ""
- echo -e "${YELLOW}⚠️ Important: Keep backup until you've verified all tools work!${NC}"
- echo ""
- echo -e "${GREEN}Need help? https://github.com/PCIRCLE-AI/claude-code-buddy/issues${NC}"
- echo ""
- exit 0
-else
- echo -e "${RED}⚠️ Migration completed with warnings${NC}"
- echo ""
- echo -e "${YELLOW}Issues detected:${NC}"
- if [ $FAILED_COUNT -gt 0 ]; then
- echo " - $FAILED_COUNT items failed to migrate"
- fi
- if [ "$VERIFICATION_PASSED" = false ]; then
- echo " - Verification checks failed"
- fi
- echo ""
- echo -e "${YELLOW}Recommendations:${NC}"
- echo " 1. Check the error messages above"
- echo " 2. Your backup is safe at: $BACKUP_DIR"
- echo " 3. Your old data is still at: $OLD_DIR"
- echo " 4. Contact support if needed: https://github.com/PCIRCLE-AI/claude-code-buddy/issues"
- echo ""
- exit 1
-fi
diff --git a/scripts/migrate-memory-system.ts b/scripts/migrate-memory-system.ts
deleted file mode 100644
index 8e659c21..00000000
--- a/scripts/migrate-memory-system.ts
+++ /dev/null
@@ -1,342 +0,0 @@
-/**
- * Memory System Migration Script
- *
- * Migrates existing memories to the new unified memory system with:
- * - UUID v4 validation and regeneration
- * - Tag normalization
- * - Metadata size validation
- * - Auto-tagging enhancement
- * - ESCAPE clause updates
- */
-
-import { KnowledgeGraph } from '../src/knowledge-graph/index.js';
-import { UnifiedMemoryStore } from '../src/memory/UnifiedMemoryStore.js';
-import { AutoTagger } from '../src/memory/AutoTagger.js';
-import type { UnifiedMemory } from '../src/memory/types/unified-memory.js';
-import { validate as validateUUID } from 'uuid';
-import { v4 as uuidv4 } from 'uuid';
-import fs from 'fs/promises';
-
-interface MigrationOptions {
- backupFile: string;
- dryRun: boolean;
- skipValidation: boolean;
- dbPath: string;
-}
-
-interface MigrationStats {
- totalMemories: number;
- idsRegenerated: number;
- tagsNormalized: number;
- metadataValidated: number;
- imported: number;
- failed: number;
-}
-
-class MemoryMigrator {
- private kg!: KnowledgeGraph;
- private memoryStore!: UnifiedMemoryStore;
- private autoTagger: AutoTagger;
- private idMap: Map = new Map();
- private stats: MigrationStats = {
- totalMemories: 0,
- idsRegenerated: 0,
- tagsNormalized: 0,
- metadataValidated: 0,
- imported: 0,
- failed: 0,
- };
-
- constructor() {
- this.autoTagger = new AutoTagger();
- }
-
- async initialize(dbPath: string): Promise {
- this.kg = await KnowledgeGraph.create(dbPath);
- this.memoryStore = new UnifiedMemoryStore(this.kg);
- }
-
- async migrate(options: MigrationOptions): Promise {
- console.log('🚀 Starting memory system migration...\n');
-
- try {
- // Initialize
- await this.initialize(options.dbPath);
-
- // Step 1: Load backup
- const memories = await this.loadBackup(options.backupFile);
- this.stats.totalMemories = memories.length;
- console.log(`📦 Loaded ${memories.length} memories from backup\n`);
-
- // Step 2: Validate and transform
- const transformed = await this.transformMemories(memories);
- console.log(`✨ Transformed ${transformed.length} memories\n`);
-
- if (options.dryRun) {
- console.log('🔍 Dry run - no changes made\n');
- this.printSummary();
- return;
- }
-
- // Step 3: Import
- await this.importMemories(transformed);
- console.log(`✅ Imported ${this.stats.imported} memories successfully\n`);
-
- // Step 4: Validate (unless skipped)
- if (!options.skipValidation) {
- await this.validateMigration(memories, transformed);
- console.log('✅ Validation passed\n');
- }
-
- this.printSummary();
- console.log('\n🎉 Migration complete!');
- } finally {
- if (this.kg) {
- this.kg.close();
- }
- }
- }
-
- private async loadBackup(file: string): Promise {
- try {
- const content = await fs.readFile(file, 'utf-8');
- return JSON.parse(content);
- } catch (error) {
- throw new Error(`Failed to load backup file: ${error}`);
- }
- }
-
- private async transformMemories(
- memories: UnifiedMemory[]
- ): Promise {
- const transformed: UnifiedMemory[] = [];
-
- for (const memory of memories) {
- try {
- // Transform ID if invalid
- const newId = this.migrateId(memory.id);
-
- // Normalize tags
- const normalizedTags = this.normalizeTags(memory.tags);
- if (normalizedTags.length !== memory.tags.length) {
- this.stats.tagsNormalized++;
- }
-
- // Auto-generate additional tags
- const enhancedTags = this.autoTagger.generateTags(
- memory.content,
- normalizedTags
- );
-
- // Validate metadata size
- const validatedMetadata = this.validateMetadata(memory.metadata);
- if (validatedMetadata !== memory.metadata) {
- this.stats.metadataValidated++;
- }
-
- transformed.push({
- ...memory,
- id: newId,
- tags: enhancedTags,
- metadata: validatedMetadata,
- timestamp: new Date(memory.timestamp),
- });
- } catch (error) {
- console.error(` ❌ Failed to transform memory ${memory.id}:`, error);
- this.stats.failed++;
- }
- }
-
- return transformed;
- }
-
- private migrateId(oldId: string | undefined): string {
- if (!oldId || !validateUUID(oldId)) {
- const newId = uuidv4();
- if (oldId) {
- this.idMap.set(oldId, newId);
- this.stats.idsRegenerated++;
- const shortId = oldId.length > 20 ? oldId.substring(0, 20) + '...' : oldId;
- console.log(` 🔄 Invalid ID ${shortId} → ${newId}`);
- } else {
- console.log(` 🆕 Generated new ID: ${newId}`);
- }
- return newId;
- }
- return oldId;
- }
-
- private normalizeTags(tags: string[]): string[] {
- return tags
- .map((tag) => tag.toLowerCase().trim())
- .filter((tag, index, self) => self.indexOf(tag) === index)
- .map((tag) => {
- // Normalize common variants
- const normalizations: Record = {
- postgres: 'postgresql',
- pg: 'postgresql',
- js: 'javascript',
- ts: 'typescript',
- react: 'react',
- nextjs: 'next.js',
- vue: 'vue',
- };
- return normalizations[tag] || tag;
- });
- }
-
- private validateMetadata(metadata?: any): any {
- if (!metadata) return undefined;
-
- // Current system uses 1MB total limit (not per-field)
- const MAX_TOTAL_SIZE = 1024 * 1024; // 1MB
- const validated: any = {};
-
- // Check total size first
- const totalJson = JSON.stringify(metadata);
- const totalSize = Buffer.byteLength(totalJson, 'utf8');
-
- if (totalSize > MAX_TOTAL_SIZE) {
- console.warn(
- ` ⚠️ Total metadata size ${totalSize} bytes exceeds ${MAX_TOTAL_SIZE} bytes - dropping oversized fields`
- );
-
- // Drop fields one by one (largest first) until under limit
- const fields = Object.entries(metadata).map(([key, value]) => ({
- key,
- value,
- size: Buffer.byteLength(JSON.stringify(value), 'utf8'),
- }));
-
- // Sort by size (largest first)
- fields.sort((a, b) => b.size - a.size);
-
- let currentSize = 0;
- for (const field of fields) {
- const fieldJson = JSON.stringify(field.value);
- const fieldSize = Buffer.byteLength(fieldJson, 'utf8');
-
- if (currentSize + fieldSize <= MAX_TOTAL_SIZE) {
- validated[field.key] = field.value;
- currentSize += fieldSize;
- } else {
- console.warn(` → Dropped field '${field.key}' (${fieldSize} bytes)`);
- }
- }
-
- return validated;
- }
-
- // Total size OK - return as-is
- return metadata;
- }
-
- private async importMemories(memories: UnifiedMemory[]): Promise {
- for (const memory of memories) {
- try {
- await this.memoryStore.store(memory);
- this.stats.imported++;
-
- if (this.stats.imported % 100 === 0) {
- console.log(` 📥 Imported ${this.stats.imported}/${memories.length} memories...`);
- }
- } catch (error) {
- console.error(` ❌ Failed to import memory ${memory.id}:`, error);
- this.stats.failed++;
- }
- }
- }
-
- private async validateMigration(
- original: UnifiedMemory[],
- transformed: UnifiedMemory[]
- ): Promise {
- console.log('🔍 Validating migration...\n');
-
- // Check counts match (accounting for failed imports)
- const expectedCount = original.length - this.stats.failed;
- if (this.stats.imported !== expectedCount) {
- throw new Error(
- `Memory count mismatch: expected ${expectedCount}, imported ${this.stats.imported}`
- );
- }
-
- // Check all IDs are valid UUIDs
- let invalidIds = 0;
- for (const memory of transformed) {
- if (!validateUUID(memory.id!)) {
- console.error(` ❌ Invalid UUID after migration: ${memory.id}`);
- invalidIds++;
- }
- }
- if (invalidIds > 0) {
- throw new Error(`Found ${invalidIds} invalid UUIDs after migration`);
- }
-
- // Check tags are normalized
- let unnormalizedTags = 0;
- for (const memory of transformed) {
- for (const tag of memory.tags) {
- if (tag !== tag.toLowerCase()) {
- console.error(` ❌ Tag not normalized: ${tag}`);
- unnormalizedTags++;
- }
- }
- }
- if (unnormalizedTags > 0) {
- throw new Error(`Found ${unnormalizedTags} unnormalized tags`);
- }
-
- console.log(' ✅ All validations passed');
- }
-
- private printSummary(): void {
- console.log('📊 Migration Summary:');
- console.log(' ─────────────────────────────────');
- console.log(` Total memories: ${this.stats.totalMemories}`);
- console.log(` IDs regenerated: ${this.stats.idsRegenerated}`);
- console.log(` Tags normalized: ${this.stats.tagsNormalized}`);
- console.log(` Metadata validated: ${this.stats.metadataValidated}`);
- console.log(` Successfully imported: ${this.stats.imported}`);
- console.log(` Failed: ${this.stats.failed}`);
- console.log(' ─────────────────────────────────');
- }
-}
-
-// CLI execution
-async function main() {
- const args = process.argv.slice(2);
-
- const options: MigrationOptions = {
- backupFile:
- args.find((a) => a.startsWith('--backup='))?.split('=')[1] ||
- 'memory-backup.json',
- dryRun: args.includes('--dry-run'),
- skipValidation: args.includes('--skip-validation'),
- dbPath:
- args.find((a) => a.startsWith('--db='))?.split('=')[1] ||
- './memory.db',
- };
-
- // Validate backup file exists
- try {
- await fs.access(options.backupFile);
- } catch {
- console.error(`❌ Backup file not found: ${options.backupFile}`);
- console.error('\nPlease export memories first:');
- console.error(' npm run migrate:export');
- process.exit(1);
- }
-
- const migrator = new MemoryMigrator();
-
- try {
- await migrator.migrate(options);
- process.exit(0);
- } catch (error) {
- console.error('\n❌ Migration failed:', error);
- process.exit(1);
- }
-}
-
-main();
diff --git a/scripts/postinstall-lib.js b/scripts/postinstall-lib.js
deleted file mode 100644
index f6b2ab2b..00000000
--- a/scripts/postinstall-lib.js
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Postinstall Library - Core Functions
- *
- * Implements plugin installation logic with backward compatibility
- */
-import { existsSync, mkdirSync, readFileSync, writeFileSync, symlinkSync, unlinkSync, copyFileSync, lstatSync, realpathSync } from 'fs';
-import { join, dirname } from 'path';
-import { homedir } from 'os';
-// ============================================================================
-// Environment Detection
-// ============================================================================
-/**
- * Detect install mode (global vs local dev)
- */
-export function detectInstallMode(installPath) {
- // Check if path contains 'node_modules' - indicates global install
- if (installPath.includes('node_modules')) {
- return 'global';
- }
- // Check if in project root (has package.json, src/, etc.)
- const hasPackageJson = existsSync(join(installPath, 'package.json'));
- const hasSrcDir = existsSync(join(installPath, 'src'));
- if (hasPackageJson && hasSrcDir) {
- return 'local';
- }
- // Default to global
- return 'global';
-}
-/**
- * Get plugin install path based on mode
- * @param mode Install mode ('global' or 'local')
- * @param scriptDir Directory where this script is located (for global mode)
- */
-export function getPluginInstallPath(mode, scriptDir) {
- if (mode === 'local') {
- // Local dev: current working directory
- return process.cwd();
- }
- // Global: use provided scriptDir or fallback to cwd
- // For postinstall script, scriptDir will be the scripts/ directory
- if (scriptDir) {
- return dirname(scriptDir); // Parent of scripts/ directory
- }
- // Fallback: assume we're in the package root
- return process.cwd();
-}
-// ============================================================================
-// File Utilities
-// ============================================================================
-/**
- * Ensure directory exists
- */
-export function ensureDirectory(path) {
- if (!existsSync(path)) {
- mkdirSync(path, { recursive: true });
- }
-}
-/**
- * Read JSON file safely
- */
-export function readJSONFile(path) {
- try {
- if (!existsSync(path)) {
- return null;
- }
- const content = readFileSync(path, 'utf-8').trim();
- if (!content) {
- return null;
- }
- return JSON.parse(content);
- }
- catch (error) {
- const msg = error instanceof Error ? error.message : String(error);
- process.stderr.write(`[readJSONFile] Failed to parse ${path}: ${msg}\n`);
- return null;
- }
-}
-/**
- * Write JSON file
- */
-export function writeJSONFile(path, data) {
- ensureDirectory(dirname(path));
- writeFileSync(path, JSON.stringify(data, null, 2) + '\n', 'utf-8');
-}
-/**
- * Backup file with timestamp
- */
-export function backupFile(path) {
- if (!existsSync(path)) {
- return null;
- }
- const backupPath = `${path}.backup-${Date.now()}`;
- try {
- copyFileSync(path, backupPath);
- return backupPath;
- }
- catch (error) {
- const msg = error instanceof Error ? error.message : String(error);
- process.stderr.write(`[backupFile] Failed to backup ${path}: ${msg}\n`);
- return null;
- }
-}
-// ============================================================================
-// Marketplace Registration
-// ============================================================================
-/**
- * Ensure marketplace is registered in known_marketplaces.json
- */
-export async function ensureMarketplaceRegistered(installPath, claudeDir = join(homedir(), '.claude')) {
- const marketplacesFile = join(claudeDir, 'plugins', 'known_marketplaces.json');
- const marketplacesDir = join(claudeDir, 'plugins', 'marketplaces');
- const symlinkPath = join(marketplacesDir, 'pcircle-ai');
- // Ensure directories exist
- ensureDirectory(join(claudeDir, 'plugins'));
- // Read existing marketplaces or create new
- let marketplaces = readJSONFile(marketplacesFile) || {};
- // If file exists but parse failed, backup and start fresh
- if (existsSync(marketplacesFile) && marketplaces === null) {
- backupFile(marketplacesFile);
- marketplaces = {};
- }
- // Register or update pcircle-ai entry
- marketplaces['pcircle-ai'] = {
- source: {
- source: 'directory',
- path: installPath
- },
- installLocation: symlinkPath,
- lastUpdated: new Date().toISOString()
- };
- // Write back
- writeJSONFile(marketplacesFile, marketplaces);
-}
-// ============================================================================
-// Symlink Management
-// ============================================================================
-/**
- * Ensure symlink exists and points to correct location
- */
-export async function ensureSymlinkExists(installPath, marketplacesDir) {
- const symlinkPath = join(marketplacesDir, 'pcircle-ai');
- // Ensure marketplaces directory exists
- ensureDirectory(marketplacesDir);
- // Check if symlink exists
- if (existsSync(symlinkPath)) {
- try {
- // Check if it's a symlink
- const stats = lstatSync(symlinkPath);
- if (stats.isSymbolicLink()) {
- // Check if it points to the correct location
- const target = realpathSync(symlinkPath);
- const expectedTarget = realpathSync(installPath);
- if (target === expectedTarget) {
- // Symlink correct, nothing to do
- return;
- }
- // Points to wrong location, remove and recreate
- unlinkSync(symlinkPath);
- }
- else {
- // Not a symlink, remove it
- unlinkSync(symlinkPath);
- }
- }
- catch (error) {
- // Error reading symlink, try to remove and recreate
- try {
- unlinkSync(symlinkPath);
- }
- catch {
- // Ignore errors
- }
- }
- }
- // Create symlink
- try {
- symlinkSync(installPath, symlinkPath, 'dir');
- }
- catch (error) {
- throw new Error(`Failed to create symlink: ${error.message}`);
- }
-}
-// ============================================================================
-// Plugin Enablement
-// ============================================================================
-/**
- * Ensure plugin is enabled in settings.json
- */
-export async function ensurePluginEnabled(claudeDir = join(homedir(), '.claude')) {
- const settingsFile = join(claudeDir, 'settings.json');
- // Read existing settings or create new
- let settings = readJSONFile(settingsFile) || {};
- // Ensure enabledPlugins object exists
- if (!settings.enabledPlugins) {
- settings.enabledPlugins = {};
- }
- // Enable memesh plugin
- settings.enabledPlugins['memesh@pcircle-ai'] = true;
- // Write back
- writeJSONFile(settingsFile, settings);
-}
-// ============================================================================
-// Backward Compatibility
-// ============================================================================
-/**
- * Detect and fix legacy installations.
- * Only fixes marketplace, symlink, and plugin enablement.
- * MCP and hooks are handled by the plugin system via .mcp.json and hooks/hooks.json.
- */
-export async function detectAndFixLegacyInstall(installPath, claudeDir = join(homedir(), '.claude')) {
- const marketplacesFile = join(claudeDir, 'plugins', 'known_marketplaces.json');
- const symlinkPath = join(claudeDir, 'plugins', 'marketplaces', 'pcircle-ai');
- // Check if marketplace registered
- const marketplaces = readJSONFile(marketplacesFile);
- const hasMarketplace = marketplaces && marketplaces['pcircle-ai'];
- // Check if symlink exists
- const hasSymlink = existsSync(symlinkPath);
- // If everything is correct, return ok
- if (hasMarketplace && hasSymlink) {
- return 'ok';
- }
- // Legacy installation detected - fix it
- // Fix marketplace
- if (!hasMarketplace) {
- await ensureMarketplaceRegistered(installPath, claudeDir);
- }
- // Fix symlink
- if (!hasSymlink) {
- await ensureSymlinkExists(installPath, join(claudeDir, 'plugins', 'marketplaces'));
- }
- // Fix plugin enablement
- await ensurePluginEnabled(claudeDir);
- // Clean up legacy MCP config if it exists (plugin system handles MCP now)
- const mcpSettingsFile = join(claudeDir, 'mcp_settings.json');
- const mcpSettings = readJSONFile(mcpSettingsFile);
- if (mcpSettings?.mcpServers?.memesh || mcpSettings?.mcpServers?.['claude-code-buddy']) {
- delete mcpSettings.mcpServers.memesh;
- delete mcpSettings.mcpServers['claude-code-buddy'];
- writeJSONFile(mcpSettingsFile, mcpSettings);
- }
- return 'fixed';
-}
diff --git a/scripts/postinstall-lib.ts b/scripts/postinstall-lib.ts
deleted file mode 100644
index d070cf75..00000000
--- a/scripts/postinstall-lib.ts
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Postinstall Library - Core Functions
- *
- * Implements plugin installation logic with backward compatibility
- */
-
-import {
- existsSync,
- mkdirSync,
- readFileSync,
- writeFileSync,
- symlinkSync,
- unlinkSync,
- copyFileSync,
- lstatSync,
- realpathSync
-} from 'fs';
-import { join, dirname } from 'path';
-import { homedir } from 'os';
-
-// ============================================================================
-// Types
-// ============================================================================
-
-export type InstallMode = 'global' | 'local';
-
-export interface MarketplaceEntry {
- source: {
- source: 'directory' | 'github' | 'git' | 'url' | 'file';
- path?: string;
- repo?: string;
- url?: string;
- };
- installLocation: string;
- lastUpdated: string;
-}
-
-export interface KnownMarketplaces {
- [key: string]: MarketplaceEntry;
-}
-
-// ============================================================================
-// Environment Detection
-// ============================================================================
-
-/**
- * Detect install mode (global vs local dev)
- */
-export function detectInstallMode(installPath: string): InstallMode {
- // Check if path contains 'node_modules' - indicates global install
- if (installPath.includes('node_modules')) {
- return 'global';
- }
-
- // Check if in project root (has package.json, src/, etc.)
- const hasPackageJson = existsSync(join(installPath, 'package.json'));
- const hasSrcDir = existsSync(join(installPath, 'src'));
-
- if (hasPackageJson && hasSrcDir) {
- return 'local';
- }
-
- // Default to global
- return 'global';
-}
-
-/**
- * Get plugin install path based on mode
- * @param mode Install mode ('global' or 'local')
- * @param scriptDir Directory where this script is located (for global mode)
- */
-export function getPluginInstallPath(mode: InstallMode, scriptDir?: string): string {
- if (mode === 'local') {
- // Local dev: current working directory
- return process.cwd();
- }
-
- // Global: use provided scriptDir or fallback to cwd
- // For postinstall script, scriptDir will be the scripts/ directory
- if (scriptDir) {
- return dirname(scriptDir); // Parent of scripts/ directory
- }
-
- // Fallback: assume we're in the package root
- return process.cwd();
-}
-
-// ============================================================================
-// File Utilities
-// ============================================================================
-
-/**
- * Ensure directory exists
- */
-export function ensureDirectory(path: string): void {
- if (!existsSync(path)) {
- mkdirSync(path, { recursive: true });
- }
-}
-
-/**
- * Read JSON file safely
- */
-export function readJSONFile(path: string): T | null {
- try {
- if (!existsSync(path)) {
- return null;
- }
- const content = readFileSync(path, 'utf-8').trim();
- if (!content) {
- return null;
- }
- return JSON.parse(content) as T;
- } catch (error) {
- const msg = error instanceof Error ? error.message : String(error);
- process.stderr.write(`[readJSONFile] Failed to parse ${path}: ${msg}\n`);
- return null;
- }
-}
-
-/**
- * Write JSON file
- */
-export function writeJSONFile(path: string, data: unknown): void {
- ensureDirectory(dirname(path));
- writeFileSync(path, JSON.stringify(data, null, 2) + '\n', 'utf-8');
-}
-
-/**
- * Backup file with timestamp
- */
-export function backupFile(path: string): string | null {
- if (!existsSync(path)) {
- return null;
- }
-
- const backupPath = `${path}.backup-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
- try {
- copyFileSync(path, backupPath);
- return backupPath;
- } catch (error) {
- const msg = error instanceof Error ? error.message : String(error);
- process.stderr.write(`[backupFile] Failed to backup ${path}: ${msg}\n`);
- return null;
- }
-}
-
-// ============================================================================
-// Marketplace Registration
-// ============================================================================
-
-/**
- * Ensure marketplace is registered in known_marketplaces.json
- */
-export async function ensureMarketplaceRegistered(
- installPath: string,
- claudeDir: string = join(homedir(), '.claude')
-): Promise {
- const marketplacesFile = join(claudeDir, 'plugins', 'known_marketplaces.json');
- const marketplacesDir = join(claudeDir, 'plugins', 'marketplaces');
- const symlinkPath = join(marketplacesDir, 'pcircle-ai');
-
- // Ensure directories exist
- ensureDirectory(join(claudeDir, 'plugins'));
-
- // Read existing marketplaces or create new
- let marketplaces: KnownMarketplaces = readJSONFile(marketplacesFile) || {};
-
- // If file exists but parse failed, backup and start fresh
- if (existsSync(marketplacesFile) && marketplaces === null) {
- backupFile(marketplacesFile);
- marketplaces = {};
- }
-
- // Register or update pcircle-ai entry
- marketplaces['pcircle-ai'] = {
- source: {
- source: 'directory',
- path: installPath
- },
- installLocation: symlinkPath,
- lastUpdated: new Date().toISOString()
- };
-
- // Write back
- writeJSONFile(marketplacesFile, marketplaces);
-}
-
-// ============================================================================
-// Symlink Management
-// ============================================================================
-
-/**
- * Ensure symlink exists and points to correct location
- */
-export async function ensureSymlinkExists(
- installPath: string,
- marketplacesDir: string
-): Promise {
- const symlinkPath = join(marketplacesDir, 'pcircle-ai');
-
- // Ensure marketplaces directory exists
- ensureDirectory(marketplacesDir);
-
- // Check if symlink exists
- if (existsSync(symlinkPath)) {
- try {
- // Check if it's a symlink
- const stats = lstatSync(symlinkPath);
- if (stats.isSymbolicLink()) {
- // Check if it points to the correct location
- const target = realpathSync(symlinkPath);
- const expectedTarget = realpathSync(installPath);
-
- if (target === expectedTarget) {
- // Symlink correct, nothing to do
- return;
- }
-
- // Points to wrong location, remove and recreate
- unlinkSync(symlinkPath);
- } else {
- // Not a symlink, remove it
- unlinkSync(symlinkPath);
- }
- } catch (error) {
- // Error reading symlink, try to remove and recreate
- try {
- unlinkSync(symlinkPath);
- } catch {
- // Ignore errors
- }
- }
- }
-
- // Create symlink
- try {
- symlinkSync(installPath, symlinkPath, 'dir');
- } catch (error) {
- throw new Error(`Failed to create symlink: ${(error as Error).message}`);
- }
-}
-
-// ============================================================================
-// Plugin Enablement
-// ============================================================================
-
-/**
- * Ensure plugin is enabled in settings.json
- */
-export async function ensurePluginEnabled(
- claudeDir: string = join(homedir(), '.claude')
-): Promise {
- const settingsFile = join(claudeDir, 'settings.json');
-
- // Read existing settings or create new
- let settings: any = readJSONFile(settingsFile) || {};
-
- // Ensure enabledPlugins object exists
- if (!settings.enabledPlugins) {
- settings.enabledPlugins = {};
- }
-
- // Enable memesh plugin
- settings.enabledPlugins['memesh@pcircle-ai'] = true;
-
- // Write back
- writeJSONFile(settingsFile, settings);
-}
-
-// ============================================================================
-// Backward Compatibility
-// ============================================================================
-
-/**
- * Detect and fix legacy installations.
- * Only fixes marketplace, symlink, and plugin enablement.
- * MCP and hooks are handled by the plugin system via .mcp.json and hooks/hooks.json.
- */
-export async function detectAndFixLegacyInstall(
- installPath: string,
- claudeDir: string = join(homedir(), '.claude')
-): Promise<'ok' | 'fixed'> {
- const marketplacesFile = join(claudeDir, 'plugins', 'known_marketplaces.json');
- const symlinkPath = join(claudeDir, 'plugins', 'marketplaces', 'pcircle-ai');
-
- // Check if marketplace registered
- const marketplaces = readJSONFile(marketplacesFile);
- const hasMarketplace = marketplaces && marketplaces['pcircle-ai'];
-
- // Check if symlink exists
- const hasSymlink = existsSync(symlinkPath);
-
- // If everything is correct, return ok
- if (hasMarketplace && hasSymlink) {
- return 'ok';
- }
-
- // Legacy installation detected - fix it
- // Fix marketplace
- if (!hasMarketplace) {
- await ensureMarketplaceRegistered(installPath, claudeDir);
- }
-
- // Fix symlink
- if (!hasSymlink) {
- await ensureSymlinkExists(installPath, join(claudeDir, 'plugins', 'marketplaces'));
- }
-
- // Fix plugin enablement
- await ensurePluginEnabled(claudeDir);
-
- // Clean up legacy MCP config if it exists (plugin system handles MCP now)
- const mcpSettingsFile = join(claudeDir, 'mcp_settings.json');
- const mcpSettings = readJSONFile(mcpSettingsFile);
- if (mcpSettings?.mcpServers?.memesh || mcpSettings?.mcpServers?.['claude-code-buddy']) {
- delete mcpSettings.mcpServers.memesh;
- delete mcpSettings.mcpServers['claude-code-buddy'];
- writeJSONFile(mcpSettingsFile, mcpSettings);
- }
-
- return 'fixed';
-}
diff --git a/scripts/postinstall-new.js b/scripts/postinstall-new.js
deleted file mode 100644
index d64cbc5c..00000000
--- a/scripts/postinstall-new.js
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env node
-/**
- * Post-install script for MeMesh Plugin
- *
- * Following official Claude Code Plugin spec:
- * - MCP servers: handled by plugin system via .mcp.json
- * - Hooks: handled by plugin system via hooks/hooks.json
- * - Skills: auto-discovered by plugin system from skills/
- *
- * This script only does:
- * 1. Detect install mode (global/local)
- * 2. Register marketplace
- * 3. Create symlink
- * 4. Enable plugin
- * 5. Fix legacy installations
- */
-
-import { fileURLToPath } from 'url';
-import { dirname, join } from 'path';
-import { homedir } from 'os';
-import chalk from 'chalk';
-import boxen from 'boxen';
-
-// Get directory path in ESM
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-
-// Import functions from postinstall-lib.js
-import {
- detectInstallMode,
- getPluginInstallPath,
- ensureMarketplaceRegistered,
- ensureSymlinkExists,
- ensurePluginEnabled,
- detectAndFixLegacyInstall
-} from './postinstall-lib.js';
-
-// ============================================================================
-// Main Installation Flow
-// ============================================================================
-
-async function main() {
- console.log(chalk.cyan('\n🚀 MeMesh Plugin Installation Starting...\n'));
-
- const results = {
- mode: null,
- installPath: null,
- marketplace: false,
- symlink: false,
- pluginEnabled: false,
- legacyFixed: null,
- errors: []
- };
-
- try {
- // Step 1: Detect install mode
- // Pass __dirname to getPluginInstallPath for global mode detection
- const installPath = getPluginInstallPath('global', __dirname);
- const mode = detectInstallMode(installPath);
- results.mode = mode;
- results.installPath = installPath;
-
- console.log(chalk.dim(` Mode: ${mode}`));
- console.log(chalk.dim(` Path: ${installPath}\n`));
-
- const claudeDir = join(homedir(), '.claude');
- const marketplacesDir = join(claudeDir, 'plugins', 'marketplaces');
-
- // Step 2: Marketplace Registration
- try {
- await ensureMarketplaceRegistered(installPath, claudeDir);
- results.marketplace = true;
- console.log(chalk.green(' ✅ Marketplace registered'));
- } catch (error) {
- results.errors.push(`Marketplace: ${error.message}`);
- console.log(chalk.yellow(` ⚠️ Marketplace registration failed (non-fatal)`));
- }
-
- // Step 3: Symlink Creation
- try {
- await ensureSymlinkExists(installPath, marketplacesDir);
- results.symlink = true;
- console.log(chalk.green(' ✅ Symlink created'));
- } catch (error) {
- results.errors.push(`Symlink: ${error.message}`);
- console.log(chalk.yellow(` ⚠️ Symlink creation failed (non-fatal)`));
- }
-
- // Step 4: Plugin Enablement
- try {
- await ensurePluginEnabled(claudeDir);
- results.pluginEnabled = true;
- console.log(chalk.green(' ✅ Plugin enabled'));
- } catch (error) {
- results.errors.push(`Plugin Enable: ${error.message}`);
- console.log(chalk.yellow(` ⚠️ Plugin enablement failed (non-fatal)`));
- }
-
- // Step 5: Legacy Installation Fix
- try {
- const legacyStatus = await detectAndFixLegacyInstall(installPath, claudeDir);
- results.legacyFixed = legacyStatus;
-
- if (legacyStatus === 'fixed') {
- console.log(chalk.green(' ✅ Legacy installation upgraded'));
- } else if (legacyStatus === 'ok') {
- console.log(chalk.dim(' ℹ️ No legacy issues detected'));
- }
- } catch (error) {
- console.log(chalk.dim(' ℹ️ Legacy check skipped'));
- }
-
- } catch (error) {
- console.error(chalk.red('\n❌ Installation failed:'), error.message);
- console.error(chalk.yellow('\n💡 You can configure manually (see instructions below)\n'));
- }
-
- // ============================================================================
- // Display Installation Summary
- // ============================================================================
-
- const allSuccess = results.marketplace && results.symlink && results.pluginEnabled;
-
- const statusIcon = allSuccess ? '✅' : (results.errors.length > 0 ? '⚠️' : '✅');
- const statusText = allSuccess
- ? chalk.green('Installation Complete!')
- : chalk.yellow('Installation completed with warnings');
-
- const message = `
-${chalk.bold(statusIcon + ' ' + statusText)}
-
-${chalk.bold('Installation Summary:')}
- ${results.marketplace ? '✅' : '⚠️'} Marketplace: ${results.marketplace ? 'Registered' : 'Failed'}
- ${results.symlink ? '✅' : '⚠️'} Symlink: ${results.symlink ? 'Created' : 'Failed'}
- ${results.pluginEnabled ? '✅' : '⚠️'} Plugin: ${results.pluginEnabled ? 'Enabled' : 'Failed'}
-
-${chalk.bold('Plugin Components (auto-managed by Claude Code):')}
- ${chalk.cyan('•')} MCP Server: 8 tools (persistent memory, semantic search, task routing)
- ${chalk.cyan('•')} Hooks: 6 auto-hooks (session recall, commit tracking, smart routing)
- ${chalk.cyan('•')} Skills: Comprehensive code review
- ${chalk.cyan('•')} Vector semantic search with ONNX embeddings (runs 100% locally)
- ${chalk.cyan('•')} Auto-relation inference in knowledge graph
-
-${chalk.bold('Next Steps:')}
- ${chalk.yellow('1.')} ${chalk.bold('Restart Claude Code')}
- Completely quit and reopen to load the plugin
-
- ${chalk.yellow('2.')} ${chalk.bold('Verify Installation')}
- Run: ${chalk.cyan('node scripts/health-check.js')}
-
- ${chalk.yellow('3.')} ${chalk.bold('Test MeMesh Tools')}
- Ask Claude: ${chalk.italic('"List available MeMesh tools"')}
-
-${chalk.bold('Documentation:')}
- ${chalk.cyan('•')} User Guide: ${chalk.underline('https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/USER_GUIDE.md')}
- ${chalk.cyan('•')} Commands: ${chalk.underline('https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/COMMANDS.md')}
-
-${results.errors.length > 0 ? chalk.yellow('\n⚠️ Warnings:\n ' + results.errors.join('\n ')) : ''}
-
-${chalk.dim('Need help? Open an issue: https://github.com/PCIRCLE-AI/claude-code-buddy/issues')}
-`;
-
- console.log(
- boxen(message, {
- padding: 1,
- margin: 1,
- borderStyle: 'round',
- borderColor: allSuccess ? 'green' : 'yellow',
- })
- );
-
- // Exit with appropriate code
- // Critical failures (plugin registration) → exit 1
- // Non-critical failures (symlink already exists) → exit 0
- const hasCriticalFailure = !results.marketplace && !results.symlink;
- const hasPluginFailure = !results.pluginEnabled;
- if (hasCriticalFailure || hasPluginFailure) {
- process.exit(1);
- }
- process.exit(0);
-}
-
-// ============================================================================
-// Execute
-// ============================================================================
-
-main().catch((error) => {
- console.error(chalk.red('\n💥 Fatal error during installation:'));
- console.error(error);
- process.exit(1);
-});
diff --git a/scripts/postinstall.js b/scripts/postinstall.js
deleted file mode 100755
index 7534eb58..00000000
--- a/scripts/postinstall.js
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/env node
-/**
- * Post-install script for MeMesh
- *
- * 1. Configures ~/.claude/mcp_settings.json (auto-registers MCP server)
- * 2. Displays installation guide
- */
-
-import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
-import { homedir } from 'os';
-import chalk from 'chalk';
-import boxen from 'boxen';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const projectRoot = process.cwd(); // npm install 時的目錄
-
-// ============================================================================
-// Step 1: Configure ~/.claude/mcp_settings.json
-// ============================================================================
-let mcpConfigured = false;
-let mcpConfigPath = '';
-
-try {
- const claudeDir = join(homedir(), '.claude');
- mcpConfigPath = join(claudeDir, 'mcp_settings.json');
-
- // Determine installation mode
- // IMPORTANT: Only configure MCP for npm installations, NOT for local development
- const isGlobalInstall = projectRoot.includes('node_modules');
-
- if (!isGlobalInstall) {
- // Skip MCP configuration in development environment
- // Reason: Prevents writing local development paths to user's mcp_settings.json
- // which would cause users to run outdated code even after npm updates
- console.log('⚠️ Skipping MCP configuration (local development mode)');
- console.log(' To test locally, manually configure mcp_settings.json');
- mcpConfigured = false;
- } else {
- // This is an npm installation - proceed with MCP configuration
- mcpConfigured = true;
- }
-
- // Create ~/.claude directory (recursive: true handles existing directory safely, avoids TOCTOU race condition)
- mkdirSync(claudeDir, { recursive: true });
-
- // Read existing config or create new one
- let mcpConfig = { mcpServers: {} };
- if (existsSync(mcpConfigPath)) {
- try {
- const existingContent = readFileSync(mcpConfigPath, 'utf-8').trim();
- if (existingContent) {
- mcpConfig = JSON.parse(existingContent);
- if (!mcpConfig.mcpServers) {
- mcpConfig.mcpServers = {};
- }
- }
- } catch (e) {
- // If parsing fails, start fresh but don't overwrite completely
- mcpConfig = { mcpServers: {} };
- }
- }
-
- // Configure memesh entry (only for npm installations)
- if (mcpConfigured) {
- mcpConfig.mcpServers.memesh = {
- command: 'npx',
- args: ['-y', '@pcircle/memesh'],
- env: {
- NODE_ENV: 'production'
- }
- };
- }
-
- // Write config only if we configured MCP
- if (mcpConfigured) {
- // Remove legacy entry if exists
- if (mcpConfig.mcpServers['claude-code-buddy']) {
- delete mcpConfig.mcpServers['claude-code-buddy'];
- }
-
- // Write config (directory already ensured above with mkdirSync recursive)
- writeFileSync(mcpConfigPath, JSON.stringify(mcpConfig, null, 2) + '\n', 'utf-8');
- }
-} catch (error) {
- // Non-fatal: user can configure manually
- console.warn(chalk.yellow(`⚠️ Could not auto-configure MCP settings: ${error.message}`));
- console.warn(chalk.yellow(' You can configure manually (see instructions below)'));
-}
-
-// ============================================================================
-// Step 2: Display Installation Message
-// ============================================================================
-const mcpStatusIcon = mcpConfigured ? '✅' : '⚠️';
-const mcpStatusText = mcpConfigured
- ? chalk.green(`Auto-configured at ${mcpConfigPath}`)
- : chalk.yellow('Manual configuration required (see below)');
-
-// Build the message based on configuration status
-const configSection = mcpConfigured
- ? `${chalk.bold('MCP Configuration:')}
- ${mcpStatusIcon} ${mcpStatusText}
- ${chalk.dim('MeMesh is ready to use! Just restart Claude Code.')}
-
-${chalk.bold('Quick Start (2 Steps):')}
-
- ${chalk.yellow('1.')} ${chalk.bold('Restart Claude Code')}
- Completely quit and reopen to load the MCP server
-
- ${chalk.yellow('2.')} ${chalk.bold('Test Connection')}
- Ask: ${chalk.italic('"List available MeMesh tools"')}`
- : `${chalk.bold('MCP Configuration:')}
- ${mcpStatusIcon} ${mcpStatusText}
-
-${chalk.bold('Quick Start (3 Steps):')}
-
- ${chalk.yellow('1.')} ${chalk.bold('Configure MCP Client')}
- Add to ~/.claude/mcp_settings.json (see below)
-
- ${chalk.yellow('2.')} ${chalk.bold('Restart Claude Code')}
- Completely quit and reopen to load the MCP server
-
- ${chalk.yellow('3.')} ${chalk.bold('Test Connection')}
- Ask: ${chalk.italic('"List available MeMesh tools"')}
-
-${chalk.bold('Manual Configuration:')}
-
-${chalk.dim('Add to ~/.claude/mcp_settings.json:')}
-
- {
- ${chalk.cyan('"mcpServers"')}: {
- ${chalk.cyan('"memesh"')}: {
- ${chalk.cyan('"command"')}: ${chalk.green('"npx"')},
- ${chalk.cyan('"args"')}: [${chalk.green('"-y"')}, ${chalk.green('"@pcircle/memesh"')}]
- }
- }
- }`;
-
-const message = `
-${chalk.bold.green('✅ MeMesh Installed Successfully!')}
-
-${chalk.bold('What You Got:')}
- ${chalk.cyan('•')} 8 MCP tools (persistent memory, semantic search, task routing, cloud sync)
- ${chalk.cyan('•')} Vector semantic search with ONNX embeddings (runs 100% locally)
- ${chalk.cyan('•')} Auto-memory with smart knowledge graph
- ${chalk.cyan('•')} Local-first architecture (all data stored locally)
-
-${configSection}
-
-${chalk.bold('Documentation:')}
- ${chalk.cyan('•')} Setup Guide: ${chalk.underline('https://github.com/PCIRCLE-AI/claude-code-buddy#installation')}
- ${chalk.cyan('•')} User Guide: ${chalk.underline('https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/USER_GUIDE.md')}
- ${chalk.cyan('•')} Commands: ${chalk.underline('https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/COMMANDS.md')}
-
-${chalk.dim('Need help? Open an issue: https://github.com/PCIRCLE-AI/claude-code-buddy/issues')}
-`;
-
-console.log(
- boxen(message, {
- padding: 1,
- margin: 1,
- borderStyle: 'round',
- borderColor: 'cyan',
- })
-);
diff --git a/scripts/pre-deployment-check.sh b/scripts/pre-deployment-check.sh
deleted file mode 100755
index a762ee68..00000000
--- a/scripts/pre-deployment-check.sh
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/bin/bash
-
-# MeMesh Plugin Pre-Deployment Check Script (Comprehensive Edition)
-# 全面檢查所有可能導致發布問題的情況
-
-set -e
-
-echo "🚀 MeMesh Plugin Pre-Deployment Check (Comprehensive)"
-echo "========================================================"
-echo ""
-
-FAILED_CHECKS=0
-TOTAL_CHECKS=0
-WARNINGS=0
-
-check_pass() {
- echo " ✅ $1"
-}
-
-check_fail() {
- echo " ❌ $1"
- FAILED_CHECKS=$((FAILED_CHECKS + 1))
-}
-
-check_warn() {
- echo " ⚠️ $1"
- WARNINGS=$((WARNINGS + 1))
-}
-
-run_check() {
- TOTAL_CHECKS=$((TOTAL_CHECKS + 1))
- echo ""
- echo "[$TOTAL_CHECKS] $1"
-}
-
-# ============================================================================
-# Part 1: 核心檔案存在性檢查
-# ============================================================================
-run_check "核心檔案存在性"
-test -f package.json && check_pass "package.json exists" || check_fail "package.json missing"
-test -f plugin.json && check_pass "plugin.json exists" || check_fail "plugin.json missing"
-test -f mcp.json && check_pass "mcp.json exists" || check_fail "mcp.json missing"
-test -f README.md && check_pass "README.md exists" || check_fail "README.md missing"
-test -f LICENSE && check_pass "LICENSE exists" || check_fail "LICENSE missing"
-test -f CHANGELOG.md && check_pass "CHANGELOG.md exists" || check_warn "CHANGELOG.md missing"
-test -f tsconfig.json && check_pass "tsconfig.json exists" || check_fail "tsconfig.json missing"
-
-# ============================================================================
-# Part 2: package.json 完整性檢查
-# ============================================================================
-run_check "package.json 完整性"
-node -e "
-const pkg = require('./package.json');
-const errors = [];
-
-// Basic fields
-if (pkg.name !== '@pcircle/memesh') errors.push('name must be @pcircle/memesh');
-if (!pkg.version) errors.push('version missing');
-if (!pkg.description) errors.push('description missing');
-if (!pkg.author) errors.push('author missing');
-if (!pkg.license) errors.push('license missing');
-if (pkg.main !== 'dist/index.js') errors.push('main must be dist/index.js');
-
-// Bin
-if (!pkg.bin || !pkg.bin.memesh) errors.push('bin.memesh missing');
-
-// Files array (CRITICAL - prevents v2.9.0 style regressions)
-if (!pkg.files) {
- errors.push('CRITICAL: files array missing');
-} else {
- const required = ['dist/', 'scripts/postinstall-new.js', 'scripts/postinstall-lib.js',
- 'scripts/skills/', 'scripts/hooks/', 'plugin.json', 'mcp.json', 'LICENSE'];
- required.forEach(f => {
- if (!pkg.files.includes(f)) {
- errors.push(\`CRITICAL: files array missing \${f}\`);
- }
- });
-}
-
-// Scripts
-const requiredScripts = ['build', 'test', 'postinstall', 'prepare:plugin'];
-requiredScripts.forEach(s => {
- if (!pkg.scripts || !pkg.scripts[s]) {
- errors.push(\`script '\${s}' missing\`);
- }
-});
-
-// Dependencies
-if (!pkg.dependencies) errors.push('dependencies missing');
-if (!pkg.devDependencies) errors.push('devDependencies missing');
-
-if (errors.length > 0) {
- errors.forEach(e => console.error(' - ' + e));
- process.exit(1);
-}
-" && check_pass "package.json 完整且正確" || check_fail "package.json 有錯誤"
-
-# ============================================================================
-# Part 3: 版本一致性檢查(跨所有文件)
-# ============================================================================
-run_check "版本號一致性(所有文件)"
-node -e "
-const fs = require('fs');
-const versions = {};
-
-// Collect versions from all sources
-versions.package = require('./package.json').version;
-versions.plugin = require('./plugin.json').version;
-
-// Check CHANGELOG
-const changelog = fs.readFileSync('CHANGELOG.md', 'utf8');
-const changelogMatch = changelog.match(/^## \\[(\\d+\\.\\d+\\.\\d+)\\]/m);
-if (changelogMatch) {
- versions.changelog = changelogMatch[1];
-} else {
- console.error('CHANGELOG.md 沒有版本號');
- process.exit(1);
-}
-
-// Check if all plugin files exist
-if (fs.existsSync('.claude-plugin/memesh/package.json')) {
- versions.pluginPackage = require('./.claude-plugin/memesh/package.json').version;
-}
-if (fs.existsSync('.claude-plugin/memesh/.claude-plugin/plugin.json')) {
- versions.pluginManifest = require('./.claude-plugin/memesh/.claude-plugin/plugin.json').version;
-}
-
-// All versions must match
-const unique = new Set(Object.values(versions));
-if (unique.size !== 1) {
- console.error('版本不一致:');
- Object.entries(versions).forEach(([k, v]) => console.error(\` \${k}: \${v}\`));
- process.exit(1);
-}
-
-console.log(' 所有版本一致: v' + versions.package);
-" && check_pass "所有版本號一致" || check_fail "版本號不一致"
-
-# ============================================================================
-# Part 4: TypeScript 編譯檢查
-# ============================================================================
-run_check "TypeScript 類型檢查"
-npm run typecheck > /dev/null 2>&1 && check_pass "類型檢查通過" || check_fail "類型檢查失敗"
-
-run_check "TypeScript 編譯"
-npm run build > /dev/null 2>&1 && check_pass "編譯成功" || check_fail "編譯失敗"
-
-run_check "dist/ 目錄完整性"
-REQUIRED_DIST=(
- "dist/index.js"
- "dist/mcp/server-bootstrap.js"
- "dist/mcp/daemon/DaemonBootstrap.js"
- "dist/mcp/daemon/DaemonLockManager.js"
- "dist/utils/PathResolver.js"
-)
-for file in "${REQUIRED_DIST[@]}"; do
- if [ -f "$file" ]; then
- check_pass "$file exists"
- else
- check_fail "$file missing"
- fi
-done
-
-# ============================================================================
-# Part 5: Plugin 結構同步檢查
-# ============================================================================
-run_check "Plugin sync"
-npm run prepare:plugin > /dev/null 2>&1 && check_pass "Plugin sync 成功" || check_fail "Plugin sync 失敗"
-
-run_check "Plugin 目錄結構"
-REQUIRED_PLUGIN_FILES=(
- ".claude-plugin/memesh/.claude-plugin/plugin.json"
- ".claude-plugin/memesh/.mcp.json"
- ".claude-plugin/memesh/package.json"
- ".claude-plugin/memesh/dist/mcp/server-bootstrap.js"
-)
-for file in "${REQUIRED_PLUGIN_FILES[@]}"; do
- if [ -f "$file" ]; then
- check_pass "$(basename $file) exists"
- else
- check_fail "$(basename $file) missing"
- fi
-done
-
-run_check "檔案內容同步檢驗 (Root ↔ Plugin)"
-node -e "
-const fs = require('fs');
-const crypto = require('crypto');
-
-// Critical files that MUST be in sync
-const syncPairs = [
- // Configuration files
- {
- root: 'plugin.json',
- plugin: '.claude-plugin/memesh/.claude-plugin/plugin.json',
- critical: true
- },
- {
- root: 'mcp.json',
- plugin: '.claude-plugin/memesh/.mcp.json',
- critical: true
- },
- {
- root: 'package.json',
- plugin: '.claude-plugin/memesh/package.json',
- critical: true
- },
- // Compiled dist files
- {
- root: 'dist/mcp/server-bootstrap.js',
- plugin: '.claude-plugin/memesh/dist/mcp/server-bootstrap.js',
- critical: true
- },
- {
- root: 'dist/index.js',
- plugin: '.claude-plugin/memesh/dist/index.js',
- critical: true
- },
- {
- root: 'dist/mcp/daemon/DaemonBootstrap.js',
- plugin: '.claude-plugin/memesh/dist/mcp/daemon/DaemonBootstrap.js',
- critical: true
- },
- // Postinstall scripts (if copied to plugin)
- {
- root: 'scripts/postinstall-lib.js',
- plugin: '.claude-plugin/memesh/scripts/postinstall-lib.js',
- critical: false // May not exist in plugin, non-fatal
- }
-];
-
-const errors = [];
-const warnings = [];
-
-syncPairs.forEach(pair => {
- const rootExists = fs.existsSync(pair.root);
- const pluginExists = fs.existsSync(pair.plugin);
-
- if (!rootExists) {
- errors.push(\`Root \${pair.root} missing\`);
- return;
- }
-
- if (!pluginExists) {
- if (pair.critical) {
- errors.push(\`Plugin \${pair.plugin} missing\`);
- } else {
- warnings.push(\`Plugin \${pair.plugin} missing (non-critical)\`);
- }
- return;
- }
-
- // Compare content hashes
- const rootHash = crypto.createHash('md5').update(fs.readFileSync(pair.root)).digest('hex');
- const pluginHash = crypto.createHash('md5').update(fs.readFileSync(pair.plugin)).digest('hex');
-
- if (rootHash !== pluginHash) {
- const msg = \`\${pair.root} ↔ \${pair.plugin} content mismatch\`;
- if (pair.critical) {
- errors.push(msg);
- } else {
- warnings.push(msg + ' (non-critical)');
- }
- }
-});
-
-if (errors.length > 0) {
- console.error(' ❌ Critical sync errors:');
- errors.forEach(e => console.error(' - ' + e));
- process.exit(1);
-}
-
-if (warnings.length > 0) {
- console.warn(' ⚠️ Non-critical warnings:');
- warnings.forEach(w => console.warn(' - ' + w));
-}
-
-console.log(' ✅ All critical files synchronized');
-" && check_pass "檔案內容同步檢驗通過" || check_fail "❌ CRITICAL: 檔案同步失敗"
-
-# ============================================================================
-# Part 6: npm pack 完整性檢查
-# ============================================================================
-run_check "npm pack 打包"
-npm pack > /dev/null 2>&1 && check_pass "打包成功" || check_fail "打包失敗"
-
-TARBALL=$(ls -t pcircle-memesh-*.tgz | head -1)
-if [ ! -f "$TARBALL" ]; then
- check_fail "找不到 tarball"
-else
- run_check "Tarball 內容完整性檢查"
-
- # Critical files
- REQUIRED_IN_TARBALL=(
- "package/package.json"
- "package/plugin.json"
- "package/mcp.json"
- "package/LICENSE"
- "package/README.md"
- "package/dist/index.js"
- "package/dist/mcp/server-bootstrap.js"
- "package/scripts/postinstall-new.js"
- "package/scripts/postinstall-lib.js"
- "package/scripts/hooks/hook-utils.js"
- "package/scripts/hooks/session-start.js"
- "package/scripts/hooks/stop.js"
- "package/scripts/hooks/post-commit.js"
- "package/scripts/hooks/post-tool-use.js"
- )
-
- for file in "${REQUIRED_IN_TARBALL[@]}"; do
- if tar -tzf "$TARBALL" | grep -q "$file"; then
- check_pass "$(echo $file | sed 's/package\///')"
- else
- check_fail "$(echo $file | sed 's/package\///') 不在 tarball 中"
- fi
- done
-
- run_check "Skills 完整性檢查"
- if tar -tzf "$TARBALL" | grep -q "package/scripts/skills/"; then
- check_pass "scripts/skills/ 包含在 tarball 中"
-
- # Count skills
- SKILL_COUNT=$(tar -tzf "$TARBALL" | grep "package/scripts/skills/.*SKILL.md" | wc -l | tr -d ' ')
- if [ "$SKILL_COUNT" -gt 0 ]; then
- check_pass "找到 $SKILL_COUNT 個 bundled skills"
- else
- check_warn "沒有找到 bundled skills"
- fi
- else
- check_fail "scripts/skills/ 不在 tarball 中"
- fi
-
- run_check "Hook-utils.js 內容驗證"
- # Extract and verify (simplified - use direct file check)
- if tar -tzf "$TARBALL" 2>/dev/null | grep -q "package/scripts/hooks/hook-utils.js"; then
- check_pass "hook-utils.js 存在於 tarball"
- # Quick content check from local file (tarball content is same as local after build)
- if grep -q "resolveMemeshDbPath" scripts/hooks/hook-utils.js; then
- check_pass "resolveMemeshDbPath() 函數存在"
- else
- check_fail "❌ CRITICAL: resolveMemeshDbPath() 函數缺失"
- fi
- else
- check_fail "❌ CRITICAL: hook-utils.js 不在 tarball 中"
- fi
-
- run_check "Tarball 大小合理性"
- TARBALL_SIZE=$(stat -f%z "$TARBALL" 2>/dev/null || stat -c%s "$TARBALL" 2>/dev/null)
- TARBALL_SIZE_MB=$((TARBALL_SIZE / 1024 / 1024))
-
- if [ "$TARBALL_SIZE_MB" -lt 50 ]; then
- check_pass "Tarball 大小: ${TARBALL_SIZE_MB}MB (合理)"
- else
- check_warn "Tarball 大小: ${TARBALL_SIZE_MB}MB (過大,檢查是否包含不必要的文件)"
- fi
-
- # Clean up
- rm "$TARBALL"
-fi
-
-# ============================================================================
-# Part 7: 配置文件正確性檢查
-# ============================================================================
-run_check "mcp.json 配置正確性"
-node -e "
-const mcp = require('./mcp.json');
-const errors = [];
-
-if (!mcp.memesh) errors.push('memesh server config missing');
-if (!mcp.memesh.command) errors.push('command missing');
-if (!mcp.memesh.args) errors.push('args missing');
-
-// Must use CLAUDE_PLUGIN_ROOT variable
-if (!mcp.memesh.args[0].includes('CLAUDE_PLUGIN_ROOT')) {
- errors.push('Must use \${CLAUDE_PLUGIN_ROOT} variable (not absolute path)');
-}
-
-if (errors.length > 0) {
- errors.forEach(e => console.error(' - ' + e));
- process.exit(1);
-}
-" && check_pass "mcp.json 配置正確" || check_fail "mcp.json 配置錯誤"
-
-run_check "plugin.json 配置正確性"
-node -e "
-const plugin = require('./plugin.json');
-const errors = [];
-
-if (!plugin.name) errors.push('name missing');
-if (!plugin.version) errors.push('version missing');
-if (!plugin.author) errors.push('author missing');
-
-// Must NOT contain mcpServers (should be in separate mcp.json)
-if (plugin.mcpServers) {
- errors.push('plugin.json should not contain mcpServers (use mcp.json instead)');
-}
-
-if (errors.length > 0) {
- errors.forEach(e => console.error(' - ' + e));
- process.exit(1);
-}
-" && check_pass "plugin.json 配置正確" || check_fail "plugin.json 配置錯誤"
-
-# ============================================================================
-# Part 8: 安全性檢查
-# ============================================================================
-run_check "敏感資訊檢查"
-SENSITIVE_FILES=(".env" ".env.local" "credentials.json" ".secret" "private.key")
-for file in "${SENSITIVE_FILES[@]}"; do
- if [ -f "$file" ]; then
- if git ls-files --error-unmatch "$file" > /dev/null 2>&1; then
- check_fail "$file 被加入 git(安全風險)"
- else
- check_pass "$file 存在但未加入 git"
- fi
- fi
-done
-
-run_check "依賴安全性檢查"
-if npm audit --audit-level=high > /dev/null 2>&1; then
- check_pass "沒有高危漏洞"
-else
- check_warn "發現依賴漏洞,執行 npm audit 查看詳情"
-fi
-
-# ============================================================================
-# Part 9: Hooks 系統檢查
-# ============================================================================
-run_check "Hooks 文件完整性"
-HOOK_FILES=(
- "scripts/hooks/hook-utils.js"
- "scripts/hooks/session-start.js"
- "scripts/hooks/stop.js"
- "scripts/hooks/post-commit.js"
- "scripts/hooks/post-tool-use.js"
- "scripts/hooks/pre-tool-use.js"
-)
-for file in "${HOOK_FILES[@]}"; do
- if [ -f "$file" ]; then
- check_pass "$(basename $file) exists"
-
- # Check if file is executable (should be for hooks)
- if [ -x "$file" ]; then
- check_pass "$(basename $file) is executable"
- else
- check_warn "$(basename $file) not executable (may cause issues)"
- fi
- else
- check_fail "$(basename $file) missing"
- fi
-done
-
-# ============================================================================
-# Part 10: MCP Server 功能測試
-# ============================================================================
-run_check "MCP Server 可執行性"
-if [ -f ".claude-plugin/memesh/dist/mcp/server-bootstrap.js" ]; then
- # Test if server can start (with timeout)
- if timeout 2s node .claude-plugin/memesh/dist/mcp/server-bootstrap.js --version > /dev/null 2>&1; then
- check_pass "MCP server 可以啟動"
- else
- # Timeout is expected for stdio mode
- check_pass "MCP server 響應 (stdio mode timeout 正常)"
- fi
-else
- check_fail "server-bootstrap.js 不存在"
-fi
-
-# ============================================================================
-# Part 11: 測試覆蓋
-# ============================================================================
-run_check "單元測試"
-# Known issue: Tests pass but vitest crashes with Segmentation fault: 11 due to native module (sqlite-vec, better-sqlite3)
-# This is intermittent and tests pass when run individually. Mark as warning instead of fatal.
-if npm test > /dev/null 2>&1; then
- check_pass "所有測試通過"
-else
- TEST_EXIT_CODE=$?
- if [ $TEST_EXIT_CODE -eq 139 ] || [ $TEST_EXIT_CODE -eq 11 ]; then
- check_warn "測試 Segmentation fault (已知 native module 問題,非致命)"
- else
- check_fail "測試失敗 (exit code: $TEST_EXIT_CODE)"
- fi
-fi
-
-run_check "測試覆蓋率"
-if [ -d "coverage" ]; then
- check_pass "測試覆蓋率報告已生成"
-else
- check_warn "沒有測試覆蓋率報告"
-fi
-
-# ============================================================================
-# Part 12: Lint 檢查
-# ============================================================================
-run_check "代碼風格檢查"
-if npm run lint > /dev/null 2>&1; then
- check_pass "Lint 檢查通過"
-else
- check_warn "Lint 檢查有警告"
-fi
-
-# ============================================================================
-# Part 13: Git 狀態檢查
-# ============================================================================
-run_check "Git 狀態"
-if [ -z "$(git status --porcelain)" ]; then
- check_pass "工作目錄乾淨"
-else
- check_warn "有未提交的變更"
- git status --short
-fi
-
-# ============================================================================
-# 最終總結
-# ============================================================================
-echo ""
-echo "========================================================"
-echo "📊 檢查結果總結"
-echo "========================================================"
-echo "總檢查項目: $TOTAL_CHECKS"
-echo "通過: $((TOTAL_CHECKS - FAILED_CHECKS - WARNINGS))"
-echo "警告: $WARNINGS"
-echo "失敗: $FAILED_CHECKS"
-echo ""
-
-if [ $FAILED_CHECKS -eq 0 ]; then
- if [ $WARNINGS -eq 0 ]; then
- echo "✅ 完美!所有檢查通過,沒有警告!"
- else
- echo "✅ 所有關鍵檢查通過(有 $WARNINGS 個警告)"
- fi
- echo ""
- echo "📝 準備發布:"
- echo " 1. 確認 CHANGELOG.md 已更新"
- echo " 2. 執行 ./scripts/release.sh [patch|minor|major]"
- echo " 3. 或手動執行 npm publish"
- exit 0
-else
- echo "❌ 有 $FAILED_CHECKS 項關鍵檢查失敗"
- echo "請修正所有失敗項目後再嘗試發布"
- exit 1
-fi
diff --git a/scripts/prepare-plugin.js b/scripts/prepare-plugin.js
deleted file mode 100755
index fc0d90fa..00000000
--- a/scripts/prepare-plugin.js
+++ /dev/null
@@ -1,395 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * Prepare Plugin Directory for Claude Code Installation
- *
- * Following superpowers plugin structure:
- * .claude-plugin/memesh/
- * ├── .claude-plugin/
- * │ └── plugin.json ← Plugin metadata
- * ├── dist/ ← Build output
- * ├── node_modules/ ← Dependencies
- * ├── package.json
- * └── scripts/
- */
-
-import { copyFileSync, cpSync, existsSync, mkdirSync, readFileSync, writeFileSync, symlinkSync, unlinkSync, statSync } from 'fs';
-import { execSync } from 'child_process';
-import { join, dirname, normalize, relative, isAbsolute } from 'path';
-import { fileURLToPath } from 'url';
-import { homedir } from 'os';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const projectRoot = join(__dirname, '..');
-
-/**
- * Validate that a resolved path stays within an expected parent directory.
- * Prevents path traversal attacks via ../ components.
- */
-function validatePathWithinParent(targetPath, expectedParent) {
- const normalizedTarget = normalize(targetPath);
- const normalizedParent = normalize(expectedParent);
- const rel = relative(normalizedParent, normalizedTarget);
- if (rel.startsWith('..') || isAbsolute(rel)) {
- console.error(` ❌ Path traversal detected: ${targetPath} escapes ${expectedParent}`);
- process.exit(1);
- }
- return normalizedTarget;
-}
-
-// Plugin directory structure (following superpowers pattern)
-const pluginRootDir = join(projectRoot, '.claude-plugin', 'memesh');
-const pluginMetadataDir = join(pluginRootDir, '.claude-plugin');
-
-console.log('🔧 Preparing plugin directory for Claude Code installation...\n');
-
-// Step 1: Create plugin directory structure
-// Use recursive mkdir which handles existing directories safely (avoids TOCTOU race condition)
-console.log('1️⃣ Creating plugin directory structure...');
-mkdirSync(pluginMetadataDir, { recursive: true });
-console.log(` ✅ Ensured: ${pluginRootDir.replace(projectRoot, '.')}`);
-console.log(` ✅ Ensured: ${pluginMetadataDir.replace(projectRoot, '.')}`);
-
-// Step 2: Copy compiled dist/ to plugin directory
-console.log('\n2️⃣ Copying compiled dist/ to plugin directory...');
-const sourceDist = join(projectRoot, 'dist');
-const targetDist = join(pluginRootDir, 'dist');
-
-if (!existsSync(sourceDist)) {
- console.error(' ❌ Error: dist/ directory not found. Please run "npm run build" first.');
- process.exit(1);
-}
-
-try {
- cpSync(sourceDist, targetDist, { recursive: true });
- console.log(' ✅ Copied dist/ → .claude-plugin/memesh/dist/');
-} catch (error) {
- console.error(' ❌ Error copying dist/:', error.message);
- process.exit(1);
-}
-
-// Step 3: Copy package.json to plugin directory
-console.log('\n3️⃣ Copying package.json to plugin directory...');
-const sourcePackageJson = join(projectRoot, 'package.json');
-const targetPackageJson = join(pluginRootDir, 'package.json');
-
-try {
- copyFileSync(sourcePackageJson, targetPackageJson);
- console.log(' ✅ Copied package.json → .claude-plugin/memesh/');
-} catch (error) {
- console.error(' ❌ Error copying package.json:', error.message);
- process.exit(1);
-}
-
-// Step 4: Copy scripts directory to plugin directory
-console.log('\n4️⃣ Copying scripts directory to plugin directory...');
-const sourceScripts = join(projectRoot, 'scripts');
-const targetScripts = join(pluginRootDir, 'scripts');
-
-try {
- cpSync(sourceScripts, targetScripts, { recursive: true });
- console.log(' ✅ Copied scripts/ → .claude-plugin/memesh/scripts/');
-} catch (error) {
- console.error(' ❌ Error copying scripts/:', error.message);
- process.exit(1);
-}
-
-// Step 5: Copy plugin.json to .claude-plugin/ subdirectory (following superpowers pattern)
-console.log('\n5️⃣ Copying plugin.json to .claude-plugin/ metadata directory...');
-const pluginJsonCandidates = [
- join(projectRoot, 'plugin.json'),
- join(projectRoot, '.claude-plugin', 'plugin.json'),
-];
-const sourcePluginJson = pluginJsonCandidates.find((candidate) => existsSync(candidate));
-const targetPluginJson = join(pluginMetadataDir, 'plugin.json');
-
-if (!sourcePluginJson) {
- console.error(' ❌ Error: plugin.json not found. Please create it at project root.');
- process.exit(1);
-}
-
-try {
- copyFileSync(sourcePluginJson, targetPluginJson);
- console.log(' ✅ Copied plugin.json → .claude-plugin/memesh/.claude-plugin/');
-} catch (error) {
- console.error(' ❌ Error copying plugin.json:', error.message);
- process.exit(1);
-}
-
-// Step 5.5: Copy .mcp.json to plugin root directory
-console.log('\n5.5️⃣ Copying .mcp.json to plugin directory...');
-const sourceMcpJson = join(projectRoot, '.mcp.json');
-const targetMcpJson = join(pluginRootDir, '.mcp.json');
-
-if (!existsSync(sourceMcpJson)) {
- console.error(' ❌ Error: .mcp.json not found. Please create it at project root.');
- process.exit(1);
-}
-
-try {
- copyFileSync(sourceMcpJson, targetMcpJson);
- console.log(' ✅ Copied .mcp.json → .claude-plugin/memesh/.mcp.json');
-} catch (error) {
- console.error(' ❌ Error copying .mcp.json:', error.message);
- process.exit(1);
-}
-
-// Step 5.6: Copy hooks/hooks.json to plugin directory
-console.log('\n5.6️⃣ Copying hooks/hooks.json to plugin directory...');
-const sourceHooksJson = join(projectRoot, 'hooks', 'hooks.json');
-const targetHooksDir = join(pluginRootDir, 'hooks');
-const targetHooksJson = join(targetHooksDir, 'hooks.json');
-
-try {
- mkdirSync(targetHooksDir, { recursive: true });
- if (existsSync(sourceHooksJson)) {
- copyFileSync(sourceHooksJson, targetHooksJson);
- console.log(' ✅ Copied hooks/hooks.json → .claude-plugin/memesh/hooks/');
- } else {
- console.log(' ⚠️ hooks/hooks.json not found, skipping');
- }
-} catch (error) {
- console.error(' ❌ Error copying hooks/hooks.json:', error.message);
-}
-
-// Step 5.7: Copy skills/ directory to plugin directory
-console.log('\n5.7️⃣ Copying skills/ to plugin directory...');
-const sourceSkills = join(projectRoot, 'skills');
-const targetSkills = join(pluginRootDir, 'skills');
-
-try {
- if (existsSync(sourceSkills)) {
- cpSync(sourceSkills, targetSkills, { recursive: true });
- console.log(' ✅ Copied skills/ → .claude-plugin/memesh/skills/');
- } else {
- console.log(' ⚠️ skills/ directory not found, skipping');
- }
-} catch (error) {
- console.error(' ❌ Error copying skills/:', error.message);
-}
-
-// Step 6: Install production dependencies
-console.log('\n6️⃣ Installing production dependencies in plugin directory...');
-console.log(' (This may take a minute...)');
-
-try {
- execSync('npm install --production --loglevel=error', {
- cwd: pluginRootDir,
- stdio: 'inherit'
- });
- console.log(' ✅ Dependencies installed successfully');
-} catch (error) {
- console.error(' ❌ Error installing dependencies:', error.message);
- process.exit(1);
-}
-
-// Step 7: Verify the plugin structure
-console.log('\n7️⃣ Verifying plugin structure...');
-
-const requiredFiles = [
- join(pluginRootDir, 'dist', 'mcp', 'server-bootstrap.js'),
- join(pluginRootDir, 'package.json'),
- join(pluginRootDir, 'node_modules'),
- join(pluginMetadataDir, 'plugin.json'), // In .claude-plugin/ subdirectory
- join(pluginRootDir, '.mcp.json'), // MCP server configuration
- join(pluginRootDir, 'hooks', 'hooks.json'), // Hook declarations
- join(pluginRootDir, 'scripts', 'hooks'), // Hook scripts
-];
-
-let allFilesExist = true;
-for (const file of requiredFiles) {
- if (existsSync(file)) {
- console.log(` ✅ ${file.replace(pluginRootDir + '/', '')}`);
- } else {
- console.error(` ❌ Missing: ${file.replace(pluginRootDir + '/', '')}`);
- allFilesExist = false;
- }
-}
-
-if (!allFilesExist) {
- console.error('\n❌ Plugin preparation incomplete. Please check errors above.');
- process.exit(1);
-}
-
-// Step 8: Register marketplace in known_marketplaces.json
-// (MCP is handled by plugin system via .mcp.json — no manual mcp_settings.json config needed)
-console.log('\n8️⃣ Registering marketplace in Claude Code...');
-
-const pluginsDir = join(homedir(), '.claude', 'plugins');
-const marketplacesDir = join(pluginsDir, 'marketplaces');
-const knownMarketplacesPath = join(pluginsDir, 'known_marketplaces.json');
-const marketplaceSymlink = join(marketplacesDir, 'pcircle-ai');
-const claudePluginRoot = join(projectRoot, '.claude-plugin');
-
-try {
- // Ensure marketplaces directory exists
- mkdirSync(marketplacesDir, { recursive: true });
- console.log(` ✅ Ensured: ${marketplacesDir}`);
-
- // Validate symlink target exists and is a directory
- if (!existsSync(claudePluginRoot)) {
- throw new Error(`Plugin source directory does not exist: ${claudePluginRoot}`);
- }
- const targetStats = statSync(claudePluginRoot);
- if (!targetStats.isDirectory()) {
- throw new Error(`Plugin source must be a directory: ${claudePluginRoot}`);
- }
-
- // Create symlink to .claude-plugin directory (atomic try-create-first approach)
- try {
- symlinkSync(claudePluginRoot, marketplaceSymlink, 'dir');
- console.log(` ✅ Created symlink: pcircle-ai → ${claudePluginRoot}`);
- } catch (err) {
- if (err.code === 'EEXIST') {
- // Symlink exists - remove and retry
- try {
- unlinkSync(marketplaceSymlink);
- symlinkSync(claudePluginRoot, marketplaceSymlink, 'dir');
- console.log(` ✅ Updated existing symlink: pcircle-ai → ${claudePluginRoot}`);
- } catch (retryErr) {
- throw new Error(`Failed to update marketplace symlink: ${retryErr.code || retryErr.message}`);
- }
- } else {
- throw err;
- }
- }
-
- // Update known_marketplaces.json (read directly, no existsSync to avoid TOCTOU race)
- let knownMarketplaces = {};
- try {
- const content = readFileSync(knownMarketplacesPath, 'utf-8').trim();
- if (content) {
- knownMarketplaces = JSON.parse(content);
- }
- } catch (e) {
- if (e.code === 'ENOENT') {
- console.log(' ℹ️ No existing marketplace config, creating new');
- } else if (e instanceof SyntaxError) {
- const backupPath = `${knownMarketplacesPath}.backup-${Date.now()}`;
- try { copyFileSync(knownMarketplacesPath, backupPath); } catch {}
- console.log(` ⚠️ Corrupted marketplace config backed up to: ${backupPath}`);
- } else {
- console.error(` ❌ Unexpected error reading marketplace config: ${e.code || e.message}`);
- throw e;
- }
- }
-
- // Register in known_marketplaces.json (required for Claude Code to discover marketplace)
- // Previous assumption that "symlink alone is sufficient" was incorrect - Claude Code needs both
- knownMarketplaces['pcircle-ai'] = {
- source: {
- source: 'directory',
- path: claudePluginRoot
- },
- installLocation: marketplaceSymlink,
- lastUpdated: new Date().toISOString()
- };
-
- try {
- writeFileSync(knownMarketplacesPath, JSON.stringify(knownMarketplaces, null, 2) + '\n', 'utf-8');
- console.log(` ✅ Registered in known_marketplaces.json: pcircle-ai`);
- } catch (writeError) {
- console.error(` ❌ Failed to write known_marketplaces.json: ${writeError.message}`);
- throw writeError;
- }
-} catch (error) {
- if (error.code === 'EACCES') {
- console.error(` ❌ Permission denied. Try running with elevated privileges.`);
- } else if (error.code === 'ENOENT') {
- console.error(` ❌ Required directory not found. Ensure project is built first.`);
- } else {
- console.error(` ❌ Marketplace registration failed (${error.code || 'unknown'}). See error details above.`);
- }
- console.error(` [Debug] ${error.message}`);
- process.exit(1);
-}
-
-// Step 9: Enable plugin in settings.json
-console.log('\n9️⃣ Enabling plugin in Claude Code settings...');
-
-const settingsPath = join(homedir(), '.claude', 'settings.json');
-
-try {
- let settings = { enabledPlugins: {} };
-
- // Read directly without existsSync to avoid TOCTOU race
- try {
- const content = readFileSync(settingsPath, 'utf-8').trim();
- if (content) {
- settings = JSON.parse(content);
- if (!settings.enabledPlugins) {
- settings.enabledPlugins = {};
- }
- }
- } catch (e) {
- if (e.code === 'ENOENT') {
- console.log(' ℹ️ No existing settings found, creating new');
- } else if (e instanceof SyntaxError) {
- const backupPath = `${settingsPath}.backup-${Date.now()}`;
- try { copyFileSync(settingsPath, backupPath); } catch {}
- console.log(` ⚠️ Corrupted settings backed up to: ${backupPath}`);
- } else {
- console.error(` ❌ Unexpected error reading settings: ${e.code || e.message}`);
- throw e;
- }
- }
-
- // Enable memesh plugin
- settings.enabledPlugins['memesh@pcircle-ai'] = true;
-
- writeFileSync(settingsPath, JSON.stringify(settings, null, 2) + '\n', 'utf-8');
- console.log(` ✅ Enabled plugin in: ${settingsPath}`);
-} catch (error) {
- if (error.code === 'EACCES') {
- console.error(` ❌ Permission denied writing settings. Try running with elevated privileges.`);
- } else if (error.code === 'ENOENT') {
- console.error(` ❌ Settings directory not found at: ${join(homedir(), '.claude')}`);
- } else {
- console.error(` ❌ Plugin enablement failed (${error.code || 'unknown'}). See error details above.`);
- }
- console.error(` [Debug] ${error.message}`);
- process.exit(1);
-}
-
-// Final success message
-console.log('\n' + '═'.repeat(60));
-console.log('✅ Plugin installation complete!');
-console.log('═'.repeat(60));
-
-console.log('\n📦 Plugin structure:');
-console.log(' .claude-plugin/memesh/');
-console.log(' ├── .claude-plugin/');
-console.log(' │ └── plugin.json ← Plugin metadata');
-console.log(' ├── .mcp.json ← MCP server config (auto-managed)');
-console.log(' ├── hooks/');
-console.log(' │ └── hooks.json ← Hook declarations (auto-managed)');
-console.log(' ├── skills/ ← Skills (auto-discovered)');
-console.log(' ├── dist/ ← Build output');
-console.log(' ├── node_modules/ ← Dependencies');
-console.log(' ├── package.json');
-console.log(' └── scripts/');
-
-console.log('\n🎯 Plugin Registration:');
-console.log(' ✅ Marketplace: pcircle-ai');
-console.log(' ✅ Symlink: ~/.claude/plugins/marketplaces/pcircle-ai');
-console.log(' ✅ Enabled: memesh@pcircle-ai');
-
-console.log('\n🔧 Plugin Components (auto-managed by Claude Code):');
-console.log(' ✅ MCP Server: via .mcp.json');
-console.log(' ✅ Hooks: via hooks/hooks.json');
-console.log(' ✅ Skills: via skills/ directory');
-
-console.log('\n🚀 Next Steps:');
-console.log(' 1. Restart Claude Code completely (quit and reopen)');
-console.log(' 2. Verify: Check for memesh tools in available tools list');
-console.log(' 3. Test: Run "buddy-help" command');
-
-console.log('\n💡 Troubleshooting:');
-console.log(' - If tools not showing: Check ~/.claude/plugins/known_marketplaces.json');
-console.log(' - If plugin disabled: Check ~/.claude/settings.json enabledPlugins');
-console.log(' - Hooks/MCP/Skills are auto-managed — no manual config needed');
-
-console.log('\n📝 Note: This is a local dev installation.');
-console.log(' For production, users should install via: npm install -g @pcircle/memesh');
-console.log('');
diff --git a/scripts/quick-install.sh b/scripts/quick-install.sh
deleted file mode 100755
index cade50d5..00000000
--- a/scripts/quick-install.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-
-# Quick Install Script for MeMesh
-# This script installs MeMesh as a Claude Code plugin
-
-set -e
-
-echo "🚀 MeMesh - Quick Install"
-echo "===================================="
-echo ""
-
-# Check if Node.js is installed
-if ! command -v node &> /dev/null; then
- echo "❌ Node.js is not installed. Please install Node.js 20+ first."
- exit 1
-fi
-
-# Check Node version
-NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1)
-if [ "$NODE_VERSION" -lt 20 ]; then
- echo "❌ Node.js version must be 20 or higher. Current: $(node -v)"
- exit 1
-fi
-
-echo "✅ Node.js $(node -v) detected"
-echo ""
-
-# Get the script directory
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
-
-# Check if we're in the right directory
-if [ ! -f "$PROJECT_DIR/package.json" ]; then
- echo "❌ Cannot find package.json. Please run this script from the MeMesh directory."
- exit 1
-fi
-
-# Install dependencies
-echo "📦 Installing dependencies..."
-cd "$PROJECT_DIR"
-npm install
-
-# Build the project
-echo "🔨 Building MeMesh..."
-npm run build
-
-# Prepare plugin directory structure
-echo "📦 Preparing plugin directory..."
-npm run build:plugin
-
-# Check if plugin was successfully prepared
-if [ ! -f "$PROJECT_DIR/.claude-plugin/memesh/.claude-plugin/plugin.json" ]; then
- echo "❌ Plugin preparation failed. Please check the error messages above."
- exit 1
-fi
-
-if [ ! -f "$PROJECT_DIR/.claude-plugin/memesh/dist/mcp/server-bootstrap.js" ]; then
- echo "❌ MCP server build failed. Please check the error messages above."
- exit 1
-fi
-
-# Configure environment
-echo "🔧 Configuring environment..."
-
-# Create .env if it doesn't exist
-if [ ! -f "$PROJECT_DIR/.env" ]; then
- if [ -f "$PROJECT_DIR/.env.example" ]; then
- cp "$PROJECT_DIR/.env.example" "$PROJECT_DIR/.env"
- echo "✅ Created .env from template"
- else
- touch "$PROJECT_DIR/.env"
- echo "✅ Created .env file"
- fi
-fi
-
-# Note: prepare-plugin.js (called via npm run build:plugin) already configures
-# ~/.claude/mcp_settings.json automatically. The following is just for verification.
-
-MCP_SETTINGS="$HOME/.claude/mcp_settings.json"
-
-if [ -f "$MCP_SETTINGS" ]; then
- if grep -q '"memesh"' "$MCP_SETTINGS" 2>/dev/null; then
- echo ""
- echo "✅ MCP settings configured at: $MCP_SETTINGS"
- echo " MeMesh is ready to use!"
- else
- echo ""
- echo "⚠️ MCP settings file exists but memesh not configured"
- echo " This is unexpected - please check $MCP_SETTINGS"
- fi
-else
- echo ""
- echo "⚠️ MCP settings file not found"
- echo " Expected at: $MCP_SETTINGS"
- echo ""
- echo " This may happen if prepare-plugin.js couldn't write the file."
- echo " You can manually create it with:"
- echo ""
- echo ' cat > ~/.claude/mcp_settings.json << EOF'
- echo ' {'
- echo ' "mcpServers": {'
- echo ' "memesh": {'
- echo ' "command": "node",'
- echo " \"args\": [\"$PROJECT_DIR/.claude-plugin/memesh/dist/mcp/server-bootstrap.js\"],"
- echo ' "env": {'
- echo ' "NODE_ENV": "production"'
- echo ' }'
- echo ' }'
- echo ' }'
- echo ' }'
- echo ' EOF'
-fi
-
-echo ""
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-echo "✅ Installation complete!"
-echo ""
-echo "📁 Plugin structure:"
-echo " .claude-plugin/memesh/"
-echo " ├── .claude-plugin/"
-echo " │ └── plugin.json ← Plugin metadata"
-echo " ├── .mcp.json ← MCP server config"
-echo " ├── dist/"
-echo " │ └── mcp/server-bootstrap.js"
-echo " ├── node_modules/"
-echo " └── scripts/"
-echo ""
-echo "🔧 MCP Configuration:"
-echo " Auto-configured at: ~/.claude/mcp_settings.json"
-echo ""
-echo "🚀 Next steps:"
-echo " 1. Restart Claude Code (completely quit and reopen)"
-echo " 2. Test: Ask \"List available MeMesh tools\""
-echo ""
-echo "🧪 Alternative: Test Plugin Locally:"
-echo " claude --plugin-dir \"$PROJECT_DIR/.claude-plugin/memesh\""
-echo ""
-echo "📚 Documentation:"
-echo " - Setup guide: docs/DEV_SETUP_GUIDE.md"
-echo " - Commands: docs/COMMANDS.md"
-echo " - User guide: docs/USER_GUIDE.md"
-echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
-echo ""
-echo "🎉 Happy coding with MeMesh!"
diff --git a/scripts/release.sh b/scripts/release.sh
deleted file mode 100755
index dae5e202..00000000
--- a/scripts/release.sh
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/bin/bash
-# Semi-automated release script
-# Handles pre-checks, version bump, and commit
-# Auto-release.yml workflow handles tag creation, GitHub release, and npm publish trigger
-
-set -e
-
-# Check arguments
-if [ -z "$1" ]; then
- echo "Usage: ./scripts/release.sh [patch|minor|major]"
- echo ""
- echo "Examples:"
- echo " ./scripts/release.sh patch # 2.5.0 → 2.5.1"
- echo " ./scripts/release.sh minor # 2.5.0 → 2.6.0"
- echo " ./scripts/release.sh major # 2.5.0 → 3.0.0"
- echo ""
- echo "This script will:"
- echo " 1. Run pre-deployment checks"
- echo " 2. Bump version in all locations"
- echo " 3. Commit and push to GitHub"
- echo " 4. Trigger auto-release.yml workflow (tag + release + npm publish)"
- exit 1
-fi
-
-VERSION_TYPE=$1
-
-# Validate version type
-if [[ ! "$VERSION_TYPE" =~ ^(patch|minor|major)$ ]]; then
- echo "❌ Invalid version type: $VERSION_TYPE"
- echo "Must be: patch, minor, or major"
- exit 1
-fi
-
-echo "🚀 Release Process Started"
-echo "================================"
-echo "Version bump type: $VERSION_TYPE"
-echo ""
-
-# 1. Pre-deployment checks
-echo "📋 Step 1/4: Running pre-deployment checks..."
-if ./scripts/pre-deployment-check.sh; then
- echo "✅ Pre-deployment checks passed"
-else
- echo "❌ Pre-deployment checks failed"
- exit 1
-fi
-echo ""
-
-# 2. Bump version (all 4 locations)
-echo "📦 Step 2/4: Bumping version ($VERSION_TYPE)..."
-npm version $VERSION_TYPE --no-git-tag-version
-
-NEW_VERSION=$(node -p "require('./package.json').version")
-echo " package.json → v$NEW_VERSION"
-
-# Sync plugin.json version
-node -e "
-const fs = require('fs');
-const p = JSON.parse(fs.readFileSync('plugin.json', 'utf8'));
-p.version = '$NEW_VERSION';
-fs.writeFileSync('plugin.json', JSON.stringify(p, null, 2) + '\n');
-"
-echo " plugin.json → v$NEW_VERSION"
-
-# Run build (includes prepare:plugin which syncs .claude-plugin/ versions + dist)
-npm run build > /dev/null 2>&1
-echo " .claude-plugin/memesh/ → synced via prepare:plugin"
-
-echo "✅ All 4 version locations synced to v$NEW_VERSION"
-echo ""
-
-# 3. Commit version changes (no tag - auto-release.yml will create it)
-echo "💾 Step 3/4: Committing version changes..."
-git add package.json plugin.json .claude-plugin/
-git commit -m "chore(release): bump version to v$NEW_VERSION"
-echo "✅ Created commit for v$NEW_VERSION"
-echo ""
-
-# 4. Push to GitHub (triggers auto-release.yml)
-echo "⬆️ Step 4/4: Pushing to GitHub..."
-git push origin main
-echo "✅ Pushed to GitHub"
-echo ""
-
-# Wait for Auto Release workflow
-echo "⏳ Waiting for Auto Release workflow to start..."
-echo "The workflow will automatically:"
-echo " • Create git tag v$NEW_VERSION"
-echo " • Create GitHub release with changelog"
-echo " • Trigger npm publish workflow"
-echo ""
-sleep 5
-
-# Monitor Auto Release workflow
-echo "📊 Monitoring Auto Release workflow..."
-WORKFLOW_ID=$(gh run list --workflow="Auto Release" --limit 1 --json databaseId,status --jq '.[0] | select(.status == "in_progress" or .status == "queued") | .databaseId')
-
-if [ -n "$WORKFLOW_ID" ]; then
- echo "Auto Release workflow started (ID: $WORKFLOW_ID)"
- echo "Watching workflow execution..."
- gh run watch $WORKFLOW_ID --exit-status || {
- echo "❌ Auto Release workflow failed"
- echo "Check logs: gh run view $WORKFLOW_ID"
- exit 1
- }
- echo "✅ Auto Release workflow completed"
-else
- echo "⚠️ Workflow not detected yet. Check manually:"
- echo " gh run list --workflow=\"Auto Release\""
- echo ""
- echo "Expected workflow actions:"
- echo " 1. Detect version change in package.json"
- echo " 2. Create tag v$NEW_VERSION"
- echo " 3. Create GitHub release"
- echo " 4. Trigger npm publish"
- exit 0
-fi
-echo ""
-
-# Monitor npm publish workflow
-echo "📦 Monitoring npm publish workflow..."
-sleep 5
-NPM_WORKFLOW_ID=$(gh run list --workflow="Publish to npm" --limit 1 --json databaseId,status --jq '.[0] | select(.status == "in_progress" or .status == "queued") | .databaseId')
-
-if [ -n "$NPM_WORKFLOW_ID" ]; then
- echo "npm publish workflow started (ID: $NPM_WORKFLOW_ID)"
- gh run watch $NPM_WORKFLOW_ID --exit-status || {
- echo "❌ npm publish workflow failed"
- echo "Check logs: gh run view $NPM_WORKFLOW_ID"
- exit 1
- }
- echo "✅ npm publish workflow completed"
-else
- echo "⚠️ npm workflow not detected. It may start shortly."
- echo "Check manually: gh run list --workflow=\"Publish to npm\""
-fi
-echo ""
-
-# Verify deployment
-echo "✅ Verifying deployment..."
-echo "Waiting 10 seconds for npm registry to update..."
-sleep 10
-
-PUBLISHED_VERSION=$(npm view @pcircle/memesh version 2>/dev/null || echo "unknown")
-
-if [ "$PUBLISHED_VERSION" = "$NEW_VERSION" ]; then
- echo "✅ Successfully published v$NEW_VERSION to npm"
-else
- echo "⚠️ npm version mismatch:"
- echo " Expected: v$NEW_VERSION"
- echo " Got: $PUBLISHED_VERSION"
- echo ""
- echo "This might be a registry delay. Wait a few minutes and check:"
- echo " npm view @pcircle/memesh version"
-fi
-echo ""
-
-# Success summary
-echo "================================"
-echo "🎊 Release Complete!"
-echo ""
-echo "Version: v$NEW_VERSION"
-echo "npm: https://www.npmjs.com/package/@pcircle/memesh/v/$NEW_VERSION"
-echo "GitHub: https://github.com/PCIRCLE-AI/claude-code-buddy/releases/tag/v$NEW_VERSION"
-echo ""
-echo "Next steps:"
-echo "1. Monitor GitHub Issues for any reports"
-echo "2. Test installation: npm install -g @pcircle/memesh@latest"
-echo "3. Verify MCP tools: memesh --version"
diff --git a/scripts/setup.sh b/scripts/setup.sh
deleted file mode 100755
index a0e98dc6..00000000
--- a/scripts/setup.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-set -e
-
-# Trap errors and provide helpful context
-trap 'echo ""; echo "❌ Setup failed at line $LINENO"; echo " You can run the script again or see README.md for manual setup"; exit 1' ERR
-
-echo "🚀 MeMesh - Automated Setup"
-echo "=================================="
-echo ""
-
-# Check Node.js version
-NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1)
-if [ "$NODE_VERSION" -lt 20 ]; then
- echo "❌ Error: Node.js 20+ required (current: $(node -v))"
- exit 1
-fi
-echo "✅ Node.js version: $(node -v)"
-
-# Check npm is installed
-if ! command -v npm &> /dev/null; then
- echo "❌ Error: npm is not installed"
- echo " Please install Node.js and npm from https://nodejs.org/"
- exit 1
-fi
-echo "✅ npm is installed"
-
-# Install dependencies
-echo ""
-echo "📦 Installing dependencies..."
-npm install
-echo "✅ Dependencies installed"
-
-# Setup environment
-echo ""
-if [ ! -f .env ]; then
- echo "📝 Creating .env file..."
- cp .env.example .env
- echo "✅ .env created from template"
- echo ""
- # Check if MCP_SERVER_MODE is true
- if grep -q "MCP_SERVER_MODE=true" .env 2>/dev/null; then
- echo "ℹ️ MCP Server mode enabled - Claude Code will manage API access"
- else
- echo "⚠️ IMPORTANT: Edit .env and add your ANTHROPIC_API_KEY"
- echo " Get your key from: https://console.anthropic.com/"
- fi
-else
- echo "✅ .env file already exists"
-fi
-
-# Run tests
-echo ""
-echo "🧪 Running tests..."
-if npm test; then
- echo "✅ All tests passed"
-else
- echo "⚠️ Some tests failed, but you can continue setup"
- echo " Fix test issues later by running: npm test"
-fi
-
-# Build project
-echo ""
-echo "🔨 Building project..."
-npm run build
-echo "✅ Build complete"
-
-# Optional MCP Server Setup
-echo ""
-echo "📡 MCP Server Setup (Optional)"
-read -p "Would you like to configure MCP server integration? (y/n) " -n 1 -r
-echo
-if [[ $REPLY =~ ^[Yy]$ ]]; then
- echo "🔧 Starting MCP server configuration..."
- npm run mcp || {
- echo "⚠️ MCP server setup failed. You can configure it later with: npm run mcp"
- }
-else
- echo "⏭ Skipping MCP server setup. You can configure it later with: npm run mcp"
-fi
-
-echo ""
-echo "✅ Setup complete!"
-echo ""
-echo "Next steps:"
-if grep -q "MCP_SERVER_MODE=false" .env 2>/dev/null; then
- echo "1. Edit .env and add your ANTHROPIC_API_KEY"
- echo " Get your key from: https://console.anthropic.com/"
-else
- echo "1. ✅ MCP Server mode is configured - Claude Code will handle API access"
-fi
-echo "2. Configure Claude Code to use this MCP server (if not already done)"
-echo ""
-echo "Documentation: README.md"
-echo "Setup time: < 15 minutes"
diff --git a/scripts/skills/comprehensive-code-review/SKILL.md b/scripts/skills/comprehensive-code-review/SKILL.md
deleted file mode 100644
index 5e5105e3..00000000
--- a/scripts/skills/comprehensive-code-review/SKILL.md
+++ /dev/null
@@ -1,276 +0,0 @@
----
-name: comprehensive-code-review
-description: |
- Complete code review framework with mandatory tool-verified checks and anti-hallucination enforcement.
- Use when: performing code reviews, reviewing PRs, checking code quality before merge.
- Keywords: code review, review code, 代碼審查, PR review, pull request, 檢查代碼.
- Auto-triggers on: "code review", "review the code", "run code review".
----
-
-# Comprehensive Code Review Framework v4.0
-
-**Philosophy**: 不信任自己。用工具證明一切。沒有 evidence 的 check 等於沒做。
-
-**v4.0 vs v3.0**: v3.0 說了「應該檢查什麼」但沒強制怎麼做。v4.0 的每一步都是**必須用工具執行並產出 evidence** 的指令,不是建議。
-
----
-
-## 🔴 Step 0: Ripple Map(強制首步,不可跳過)
-
-**PURPOSE**: 在做任何 review 之前,先找出所有「改了 A 就必須改 B」的連鎖反應。
-
-**這是 v4.0 最重要的改變。** 之前的 review 只看「已改的檔案寫得對不對」,從不問「還有哪些檔案該改但沒改」。
-
-### 執行方式(必須用工具)
-
-```
-FOR EACH modified file:
- 1. 找出這個文件中被新增/修改的 export(type, function, constant)
- 2. Grep 整個 codebase 找出所有 import 這些 export 的文件
- 3. 這些文件是否也在本次修改範圍內?
- - 是 → 後續 review 時檢查一致性
- - 否 → 🔴 SUSPECT: 可能遺漏。立即 Read 該文件確認是否需要更新
-```
-
-### 必須執行的工具命令
-
-```bash
-# 1. 列出所有修改的文件
-git diff --name-only HEAD # 或 review 指定的文件列表
-
-# 2. 對每個被修改的 interface/type/function,搜尋所有引用
-Grep: pattern="DeliverParams" # 每個被修改的型別名
-Grep: pattern="deliverOrder" # 每個被修改的函數名
-Grep: pattern="delivery_result" # 每個被新增的 DB column
-
-# 3. 比對:引用這些東西的文件 vs 本次修改的文件
-# 差集 = 可能遺漏的文件
-```
-
-### Ripple Map Output 格式(必須產出)
-
-```
-## Ripple Map
-
-| Changed Symbol | Files That Reference It | In Scope? | Verified? |
-|----------------|------------------------|-----------|-----------|
-| `DeliverParams` | sdk/types.ts, sdk/client.ts, deliver/route.ts | ✅/❌ | Read 確認 |
-| `deliverOrder()` | client.ts, sdk/client.ts, example/index.ts | ✅/❌ | Read 確認 |
-| `delivery_result` | supabase.ts, api-utils.ts, deliver/route.ts | ✅/❌ | Read 確認 |
-
-🔴 Unsynchronized: [列出所有引用但未修改的文件]
-```
-
-**如果 Ripple Map 發現 unsynchronized 文件,直接列為 CRITICAL issue,不需要進入後續維度。**
-
----
-
-## Step 1: Scope Analysis
-
-確認審查範圍:
-- 哪些文件被修改?(用 git diff 或文件列表,不可憑記憶)
-- 變更的目的是什麼?
-- **有幾個 package/module 邊界被跨越?**
-- **Ripple Map 發現多少個 unsynchronized 文件?**
-
----
-
-## Step 2: Mandatory Verification Dimensions
-
-**12 個維度。Dim 11 和 12 是最高優先級,必須用 Explore subagent 或多個 Read/Grep 並行執行。**
-
-**每個 check 必須產出 evidence。格式:**
-```
-[DIM-11a] ✅ PASS — import { validateOutputSchema } from '@/lib/api-utils'
- Evidence: Read api-utils.ts → line 118: export function validateOutputSchema(...)
-
-[DIM-12d] 🔴 FAIL — SDK DeliverParams missing deliveryResult
- Evidence: Read packages/sdk/src/types.ts:177-180 → only has deliveryHash, deliveryMetadata
-```
-
-**沒有 evidence 的 ✅ 等於 ❌。**
-
----
-
-### 🔴 Dim 11: Reality Check(必須用 Explore subagent 執行)
-
-**IMPORTANT: 這個維度必須 dispatch 至少一個 Explore subagent 來獨立驗證。不可自己看一眼就說 PASS。**
-
-**Subagent prompt 模板**:
-```
-In [project_path], verify the following for files [list]:
-1. Every import resolves to a real export (read the source file to confirm)
-2. Every method call references a method that exists in the target class
-3. Every DB column referenced exists in migrations
-4. Search for TODO/FIXME/STUB in all modified files
-5. For every new function: grep for callers — is it actually called from somewhere?
-Report exact findings with file:line evidence for each.
-```
-
-**11a-11f 檢查項目**(subagent 必須全部執行):
-
-| Check | Tool | What to verify |
-|-------|------|----------------|
-| 11a. Import/Export | Read 被 import 的文件 | export 真的存在 |
-| 11b. Method calls | Read 被呼叫的 class | method 真的定義了 |
-| 11c. DB schema | Read migration files | column 在 migration 裡 |
-| 11d. Example code | Read constructor + method 簽名 | 參數和回傳值匹配 |
-| 11e. Stubs | Grep TODO/FIXME/STUB/throw.*not.impl | 無假完成 |
-| 11f. Dead wire | Grep function name across codebase | 有 caller 存在 |
-
----
-
-### 🔴 Dim 12: Cross-boundary Sync(必須用 Explore subagent 執行)
-
-**IMPORTANT: Monorepo 或多 package 專案時,此維度必須 dispatch 獨立 subagent。**
-
-**Subagent prompt 模板**:
-```
-In [project_path], check cross-boundary type synchronization:
-1. Compare [platform types file] with [SDK types file] — list every interface
- that exists in both. For each, compare field-by-field.
-2. Compare [platform client file] with [SDK client file] — list every method
- that exists in both. For each, compare signature.
-3. For every API route that accepts a body: read the route handler AND the SDK
- method that calls it. Do the field names match?
-4. Trace: migration column → DbType → mapResponse → API type → SDK type.
- Report any broken links.
-Report ALL mismatches with exact file:line evidence.
-```
-
-**12a-12d 檢查項目**(subagent 必須全部執行):
-
-| Check | What | How |
-|-------|------|-----|
-| 12a. Type parity | 同名 interface 欄位一致 | Read 兩個 type 文件,逐欄比對 |
-| 12b. Client parity | 同名 method 簽名一致 | Read 兩個 client 文件,逐方法比對 |
-| 12c. Route↔SDK match | Route body 欄位 = SDK params 欄位 | Read route + SDK method |
-| 12d. Full chain | migration→DbType→mapper→ApiType→SdkType | 追蹤新增欄位通過每一層 |
-
----
-
-### Dim 1-10: Standard Quality Dimensions
-
-#### 1. 🔒 Security
-- Injection attacks (SQL, Command, Path traversal)
-- Sensitive data exposure
-- Auth/AuthZ gaps, hardcoded secrets
-
-#### 2. 🔄 Concurrency
-- Race conditions (TOCTOU), deadlocks
-- Atomic operation needs
-
-#### 3. 💾 Resource Management
-- Resource leaks, unbounded buffers
-- Timer/listener cleanup
-
-#### 4. ❌ Error Handling
-- Uncaught exceptions, empty catch
-- Unhandled Promise rejections
-
-#### 5. 📊 Edge Cases
-- Empty/null/NaN/zero/MAX values
-- First run / cold start
-
-#### 6. ✅ Input Validation
-- Type, range, format, length validation
-
-#### 7. ⚡ Performance
-- O(n²), blocking I/O, N+1 queries
-
-#### 8. 📝 Code Quality
-- Dead code, magic numbers, naming
-
-#### 9. 📖 Documentation
-- API docs, outdated comments
-
-#### 10. 🧪 Test Coverage
-- Happy/error/edge paths tested
-- New functions have corresponding tests
-
----
-
-## Step 3: Issue Classification
-
-| Level | Symbol | Criteria | Action |
-|-------|--------|----------|--------|
-| CRITICAL | 🔴 | Security, crash, **hallucination, broken integration, missing sync** | Fix immediately |
-| MAJOR | 🟠 | Bugs, leaks, performance, type mismatch | Fix before merge |
-| MINOR | 🟡 | Quality, maintenance | Document |
-
-**鐵則**: Dim 11/12 的 issue 最低 MAJOR,大部分 CRITICAL。
-
----
-
-## Step 4: Fix Dispatch
-
-```
-Dispatch subagents in parallel (max 6):
-- Group by file or logical module
-- Each subagent: fix root cause → run related tests → validate no regression
-- After all complete: run full test suite
-- After full suite: re-run Ripple Map to verify no new gaps
-```
-
----
-
-## Step 5: Final Verification Gate
-
-**在宣稱 review 完成前,必須全部通過:**
-
-```
-□ Ripple Map 無 unsynchronized 文件
-□ Dim 11 所有 check 有 evidence 且 PASS
-□ Dim 12 所有 check 有 evidence 且 PASS
-□ 所有 CRITICAL issues 已修復
-□ 所有 MAJOR issues 已修復
-□ MINOR issues 已記錄
-□ tsc --noEmit pass(主專案)
-□ tsc --noEmit pass(每個子 package 獨立跑)
-□ 全部測試 pass
-□ Build pass
-```
-
-**如果任何 check 缺少 evidence,整個 review 標記為 INCOMPLETE。**
-
----
-
-## AI 幻想模式速查表
-
-| # | Pattern | 怎麼偵測 | 工具 |
-|---|---------|---------|------|
-| 1 | **Ghost Method** — 呼叫不存在的方法 | Read class file, search method | Read |
-| 2 | **Phantom Import** — import 不存在的 export | Read source file | Read/Grep |
-| 3 | **Schema Drift** — 讀寫不存在的 DB column | Read all migrations | Read |
-| 4 | **Type Island** — 只改一邊的 type | Ripple Map 自動抓 | Grep |
-| 5 | **Dead Wire** — 定義了但沒被呼叫 | Grep function name | Grep |
-| 6 | **Stub Disguise** — TODO/return {} 偽裝完成 | Grep TODO/FIXME/STUB | Grep |
-| 7 | **Constructor Lie** — 參數不匹配 | Read constructor | Read |
-| 8 | **Mock Leak** — test mock 了但 prod 沒實作 | Check real implementation | Read |
-| 9 | **One-side Fix** — 改了 A 忘改 B | Ripple Map 自動抓 | Grep |
-| 10 | **Verify Theater** — tsc pass 但只跑主 package | 獨立跑每個子 package tsc | Bash |
-
----
-
-## 何時執行
-
-| 觸發條件 | 執行範圍 |
-|---------|---------|
-| 新功能 > 100 lines | Full review (all 12 dims) |
-| API/DB schema 變更 | Full review + 強制 Dim 11/12 |
-| 跨 package 變更 | Full review + 強制 Dim 12 |
-| Bug fix < 50 lines | Quick review (Dim 11 + 改動相關維度) |
-| 任何 monorepo 變更 | 至少 Ripple Map + Dim 12 |
-
----
-
-**Version**: 4.0
-**Last Updated**: 2026-02-22
-**Changelog**:
-- v4.0: 新增 Step 0 Ripple Map — 強制發現遺漏的連鎖修改
-- v4.0: Dim 11/12 必須 dispatch Explore subagent 獨立驗證
-- v4.0: 每個 check 必須產出 evidence,無 evidence = 未完成
-- v4.0: 新增 Verify Theater pattern(tsc pass 但只跑主 package)
-- v4.0: 新增 Final Verification Gate — 子 package 必須獨立跑 tsc
-- v3.0: Added Dim 11/12, hallucination pattern table
-- v2.0: Initial 10-dimension framework
diff --git a/scripts/test-installation.sh b/scripts/test-installation.sh
deleted file mode 100755
index 4761624b..00000000
--- a/scripts/test-installation.sh
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/bin/bash
-#
-# Complete Installation Test Suite
-#
-# Tests the ACTUAL installation flow before publishing to npm.
-# This catches issues like:
-# - Process exiting immediately
-# - Stdio pollution
-# - Missing dependencies
-# - Broken bin scripts
-#
-# Usage: ./scripts/test-installation.sh
-
-set -e
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-NC='\033[0m' # No Color
-
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo -e "${BLUE} Complete Installation Test Suite${NC}"
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo ""
-
-PASSED=0
-FAILED=0
-WARNINGS=0
-
-test_step() {
- local step_name="$1"
- echo -e "${YELLOW}▶ $step_name${NC}"
-}
-
-test_pass() {
- echo -e "${GREEN} ✓ $1${NC}"
- PASSED=$((PASSED + 1))
-}
-
-test_fail() {
- echo -e "${RED} ✗ $1${NC}"
- FAILED=$((FAILED + 1))
-}
-
-test_warn() {
- echo -e "${YELLOW} ⚠ $1${NC}"
- WARNINGS=$((WARNINGS + 1))
-}
-
-# ============================================================================
-# Step 1: Build Package
-# ============================================================================
-test_step "Step 1: Building package"
-
-if npm run build > /dev/null 2>&1; then
- test_pass "TypeScript compilation successful"
-else
- test_fail "TypeScript compilation failed"
- exit 1
-fi
-
-# Check dist/ exists and has files
-if [ -d "dist" ] && [ "$(ls -A dist)" ]; then
- test_pass "dist/ directory populated"
-else
- test_fail "dist/ directory empty or missing"
- exit 1
-fi
-
-echo ""
-
-# ============================================================================
-# Step 2: Create Test NPM Package
-# ============================================================================
-test_step "Step 2: Creating test npm package"
-
-# Pack the package (like npm publish but local)
-if npm pack > /dev/null 2>&1; then
- TARBALL=$(ls -t *.tgz | head -1)
- test_pass "Package tarball created: $TARBALL"
-else
- test_fail "npm pack failed"
- exit 1
-fi
-
-echo ""
-
-# ============================================================================
-# Step 3: Test Local Installation
-# ============================================================================
-test_step "Step 3: Testing local installation"
-
-# Create temp directory for testing
-TEST_DIR=$(mktemp -d)
-cd "$TEST_DIR"
-
-# Install from local tarball
-if npm install -g "$OLDPWD/$TARBALL" > /dev/null 2>&1; then
- test_pass "Global install from tarball successful"
-else
- test_fail "Global install failed"
- cd "$OLDPWD"
- rm -rf "$TEST_DIR"
- exit 1
-fi
-
-cd "$OLDPWD"
-
-echo ""
-
-# ============================================================================
-# Step 4: Test MCP Server Startup
-# ============================================================================
-test_step "Step 4: Testing MCP server startup"
-
-# Test 1: Server starts without crashing
-STARTUP_TEST=$(mktemp)
-(
- DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js > "$STARTUP_TEST" 2>&1 &
- SERVER_PID=$!
- sleep 3
- if kill -0 $SERVER_PID 2>/dev/null; then
- # Process still running
- kill $SERVER_PID 2>/dev/null || true
- exit 0
- else
- # Process already dead
- exit 1
- fi
-)
-
-if [ $? -eq 0 ]; then
- test_pass "Server process stays alive (3 second test)"
-else
- test_fail "Server exited immediately"
- echo "Output:"
- cat "$STARTUP_TEST"
-fi
-
-rm -f "$STARTUP_TEST"
-
-# Test 2: No stdout pollution
-POLLUTION_TEST=$(mktemp)
-(
- DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js > "$POLLUTION_TEST" 2>&1 &
- SERVER_PID=$!
- sleep 1
- kill $SERVER_PID 2>/dev/null || true
-)
-
-if [ -s "$POLLUTION_TEST" ]; then
- # File has content = pollution detected
- test_fail "Stdout/stderr pollution detected:"
- cat "$POLLUTION_TEST"
-else
- test_pass "No stdout/stderr pollution"
-fi
-
-rm -f "$POLLUTION_TEST"
-
-echo ""
-
-# ============================================================================
-# Step 5: Test JSON-RPC Communication
-# ============================================================================
-test_step "Step 5: Testing JSON-RPC communication"
-
-JSONRPC_TEST=$(mktemp)
-STDIN_FILE=$(mktemp)
-WATCHDOG_PID=""
-RPC_PID=""
-INIT_REQUEST='{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"test","version":"1.0.0"}}}'
-
-# Setup cleanup handler to ensure temp files are removed even on script error/exit
-cleanup_step5() {
- # Kill processes if still running
- [ -n "$RPC_PID" ] && kill "$RPC_PID" 2>/dev/null || true
- [ -n "$WATCHDOG_PID" ] && kill "$WATCHDOG_PID" 2>/dev/null || true
- # Remove temp files
- rm -f "$STDIN_FILE" "$JSONRPC_TEST" 2>/dev/null || true
-}
-trap cleanup_step5 EXIT ERR
-
-# Write request to stdin file
-echo "$INIT_REQUEST" > "$STDIN_FILE"
-
-# Start server with stdin from file
-# MEMESH_DISABLE_DAEMON=1: Force standalone mode (avoid daemon bootstrap complexity)
-# DISABLE_MCP_WATCHDOG=1: Disable manual startup detection
-MEMESH_DISABLE_DAEMON=1 DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js < "$STDIN_FILE" > "$JSONRPC_TEST" 2>&1 &
-RPC_PID=$!
-
-# Start timeout watchdog (kills server after 5 seconds if it hangs)
-(sleep 5; kill "$RPC_PID" 2>/dev/null || true) &
-WATCHDOG_PID=$!
-
-# Wait for server to process request
-sleep 2
-
-# Kill server and watchdog
-kill $RPC_PID 2>/dev/null || true
-wait $RPC_PID 2>/dev/null || true
-kill $WATCHDOG_PID 2>/dev/null || true
-wait $WATCHDOG_PID 2>/dev/null || true
-
-# Reset trap (cleanup will still happen at script exit, but we've already cleaned up processes)
-trap - EXIT ERR
-
-# Check response
-# ✅ IMPROVED: Validate actual JSON structure, not just grep for "jsonrpc"
-if command -v jq >/dev/null 2>&1; then
- # Use jq for strict JSON validation
- if jq -e '.jsonrpc == "2.0" and .result != null' "$JSONRPC_TEST" > /dev/null 2>&1; then
- test_pass "Valid JSON-RPC response received (validated with jq)"
- else
- test_fail "Invalid JSON-RPC structure"
- echo "Expected: {\"jsonrpc\":\"2.0\",\"result\":{...}}"
- echo "Got:"
- cat "$JSONRPC_TEST"
- fi
-else
- # Fallback to basic check if jq not available
- if grep -q "jsonrpc" "$JSONRPC_TEST"; then
- test_pass "Valid JSON-RPC response received"
- else
- test_fail "No valid JSON-RPC response"
- echo "Output:"
- cat "$JSONRPC_TEST"
- fi
-fi
-
-# Check for pollution
-if grep -q "dotenv" "$JSONRPC_TEST"; then
- test_fail "dotenv pollution in JSON-RPC output"
-elif head -1 "$JSONRPC_TEST" | grep -q "^{"; then
- test_pass "Clean JSON-RPC output (no pollution)"
-else
- test_warn "JSON-RPC output may have pollution:"
- head -3 "$JSONRPC_TEST"
-fi
-
-rm -f "$JSONRPC_TEST"
-
-echo ""
-
-# ============================================================================
-# Step 6: Test Cursor Deep Link Config
-# ============================================================================
-test_step "Step 6: Verifying Cursor deep link configuration"
-
-# Read package.json
-PACKAGE_NAME=$(node -p "require('./package.json').name")
-BIN_COMMAND=$(node -p "Object.keys(require('./package.json').bin)[0]")
-
-# Expected config
-EXPECTED_CONFIG='{"command":"npx","args":["-y","'$PACKAGE_NAME'"]}'
-EXPECTED_B64=$(echo -n "$EXPECTED_CONFIG" | base64)
-
-test_pass "Package name: $PACKAGE_NAME"
-test_pass "Bin command: $BIN_COMMAND"
-
-# Show expected Cursor link
-echo ""
-echo " Expected Cursor link:"
-echo " cursor://anysphere.cursor-deeplink/mcp/install?name=$PACKAGE_NAME&config=$EXPECTED_B64"
-echo ""
-
-# ============================================================================
-# Cleanup
-# ============================================================================
-test_step "Cleanup"
-
-# Remove tarball
-rm -f "$TARBALL"
-test_pass "Removed test tarball"
-
-# Uninstall global package
-npm uninstall -g "$PACKAGE_NAME" > /dev/null 2>&1 || true
-test_pass "Uninstalled test package"
-
-echo ""
-
-# ============================================================================
-# Summary
-# ============================================================================
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo -e "${BLUE} Test Summary${NC}"
-echo -e "${BLUE}═══════════════════════════════════════════════════${NC}"
-echo ""
-echo -e " Passed: ${GREEN}$PASSED${NC}"
-echo -e " Failed: ${RED}$FAILED${NC}"
-echo -e " Warnings: ${YELLOW}$WARNINGS${NC}"
-echo ""
-
-if [ $FAILED -eq 0 ]; then
- echo -e "${GREEN}✅ ALL TESTS PASSED - SAFE TO PUBLISH${NC}"
- echo ""
- echo "Next steps:"
- echo " 1. Update version: npm version patch|minor|major"
- echo " 2. Publish: npm publish --access public"
- echo " 3. Test from npm: npx -y $PACKAGE_NAME"
- echo ""
- exit 0
-else
- echo -e "${RED}❌ $FAILED TEST(S) FAILED - DO NOT PUBLISH${NC}"
- echo ""
- echo "Fix the issues above before publishing."
- echo ""
- exit 1
-fi
diff --git a/scripts/test-mcp-server-standalone.sh b/scripts/test-mcp-server-standalone.sh
deleted file mode 100755
index b7275f9a..00000000
--- a/scripts/test-mcp-server-standalone.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-# 測試 MCP Server 獨立運作
-# 不需要 Claude Code,直接測試 MCP server 的功能
-
-set -e
-
-echo "🧪 MCP Server 獨立功能測試"
-echo "================================"
-echo ""
-echo "目的:驗證 MCP server 本身能正常運作"
-echo ""
-
-# 檢查必要檔案
-echo "1. 檢查 MCP server 檔案..."
-test -f .claude-plugin/memesh/dist/mcp/server-bootstrap.js || {
- echo "❌ MCP server 檔案不存在,先執行 npm run build:plugin"
- exit 1
-}
-echo "✅ MCP server 檔案存在"
-echo ""
-
-# 測試版本命令
-echo "2. 測試版本命令..."
-VERSION=$(node ./.claude-plugin/memesh/dist/mcp/server-bootstrap.js --version 2>&1)
-if [ $? -eq 0 ]; then
- echo "✅ 版本: $VERSION"
-else
- echo "⚠️ 版本命令需要 stdio(正常)"
-fi
-echo ""
-
-# 測試 MCP server 能否啟動(使用 MCP 協議)
-echo "3. 測試 MCP 協議初始化..."
-cat > /tmp/mcp-test-init.json << 'EOF'
-{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"test-client","version":"1.0.0"}}}
-EOF
-
-# 發送初始化請求(使用 Perl 實現跨平台 timeout)
-perl -e 'alarm 5; exec @ARGV' node ./.claude-plugin/memesh/dist/mcp/server-bootstrap.js < /tmp/mcp-test-init.json > /tmp/mcp-response.json 2>&1 || {
- echo "⚠️ MCP server 回應(可能需要完整握手)"
-}
-
-if [ -f /tmp/mcp-response.json ] && [ -s /tmp/mcp-response.json ]; then
- echo "✅ MCP server 有回應"
- head -5 /tmp/mcp-response.json
-else
- echo "⚠️ 無法獲取完整回應(可能需要完整的 MCP 握手流程)"
-fi
-echo ""
-
-# 清理
-rm -f /tmp/mcp-test-init.json /tmp/mcp-response.json
-
-# 測試環境變數
-echo "4. 測試環境變數處理..."
-NODE_ENV=production \
-DISABLE_MCP_WATCHDOG=1 \
-node ./.claude-plugin/memesh/dist/mcp/server-bootstrap.js --version 2>&1 | head -1 && echo "✅ 環境變數正常處理" || echo "⚠️ 環境變數檢查"
-echo ""
-
-# 測試依賴完整性
-echo "5. 測試依賴完整性..."
-cd .claude-plugin/memesh
-if npm ls --production > /dev/null 2>&1; then
- echo "✅ 所有 production 依賴完整"
-else
- echo "⚠️ 依賴檢查警告(可能有 peer dependencies)"
- npm ls --production 2>&1 | grep -E "WARN|ERR" | head -5
-fi
-cd ../..
-echo ""
-
-echo "================================"
-echo "📊 測試總結"
-echo "================================"
-echo ""
-echo "✅ MCP server 檔案完整"
-echo "✅ 可執行(需要 stdio 輸入)"
-echo "✅ 環境變數處理正確"
-echo "✅ 依賴完整"
-echo ""
-echo "⚠️ 限制:"
-echo " - 無法測試完整的 MCP 協議握手"
-echo " - 無法測試與 Claude Code 的整合"
-echo " - 需要實際的 Claude Code 環境才能完整驗證"
-echo ""
-echo "💡 下一步:"
-echo " 1. 在本地 Claude Code 測試:claude mcp list | grep memesh"
-echo " 2. 測試實際功能:執行 buddy-help 命令"
-echo " 3. 驗證 MCP tools 可用"
-echo ""
diff --git a/scripts/verify-ccb-installation.sh b/scripts/verify-ccb-installation.sh
deleted file mode 100755
index 43c01dc7..00000000
--- a/scripts/verify-ccb-installation.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-# MeMesh Installation Verification Script
-# Verifies that MeMesh MCP Server is properly installed and working
-
-set -e
-
-echo "======================================"
-echo "MeMesh Installation Verification"
-echo "======================================"
-echo ""
-
-# Color codes
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-# Step 1: Check if dist/mcp/server-bootstrap.js exists
-echo "Step 1: Checking compiled MCP server..."
-if [ -f "dist/mcp/server-bootstrap.js" ]; then
- echo -e "${GREEN}✓${NC} dist/mcp/server-bootstrap.js exists"
-else
- echo -e "${RED}✗${NC} dist/mcp/server-bootstrap.js not found"
- echo "Please run: npm run build"
- exit 1
-fi
-
-# Step 2: Check if MeMesh is in MCP server list
-echo ""
-echo "Step 2: Checking MCP server registration..."
-if claude mcp list | grep -E -q "memesh|memesh|claude-code-buddy"; then
- echo -e "${GREEN}✓${NC} MeMesh is registered in MCP server list"
-else
- echo -e "${RED}✗${NC} MeMesh is not registered"
- echo "Please run: claude mcp add memesh --scope user -e NODE_ENV=production -e MEMESH_DATA_DIR=\$HOME/.memesh -e LOG_LEVEL=info -- node $(pwd)/dist/mcp/server-bootstrap.js"
- exit 1
-fi
-
-# Step 3: Check connection status
-echo ""
-echo "Step 3: Checking MCP server connection..."
-if claude mcp list | grep -E "memesh|memesh|claude-code-buddy" | grep -q "✓ Connected"; then
- echo -e "${GREEN}✓${NC} MeMesh MCP server is connected"
-else
- echo -e "${RED}✗${NC} MeMesh MCP server failed to connect"
- echo "Please check logs and verify the build is up to date"
- echo "The watchdog timeout is 15 seconds by default (configurable via MCP_WATCHDOG_TIMEOUT_MS)"
- exit 1
-fi
-
-# Step 4: Test manual execution
-echo ""
-echo "Step 4: Testing manual execution (3 second test)..."
-NODE_ENV=production MEMESH_DATA_DIR=$HOME/.memesh LOG_LEVEL=info DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js 2>&1 &
-NODE_PID=$!
-sleep 3
-if kill -0 $NODE_PID 2>/dev/null; then
- echo -e "${GREEN}✓${NC} MeMesh MCP server runs successfully"
- kill -SIGTERM $NODE_PID 2>/dev/null || true
- wait $NODE_PID 2>/dev/null || true
-else
- echo -e "${RED}✗${NC} MeMesh MCP server crashed during startup"
- exit 1
-fi
-
-echo ""
-echo "======================================"
-echo -e "${GREEN}✓ All checks passed!${NC}"
-echo "======================================"
-echo ""
-echo "MeMesh v2.6.0 is successfully installed and working."
-echo ""
-echo "Next steps:"
-echo "1. Restart your Claude Code session to load the new MCP server"
-echo "2. Use MeMesh MCP tools in Claude Code"
-echo ""
-echo "Available MeMesh tools:"
-echo " - buddy-do: Execute tasks with intelligent routing"
-echo " - buddy-remember: Store and retrieve knowledge"
-echo " - buddy-plan: Generate implementation plans"
-echo " - And more..."
-echo ""
diff --git a/scripts/verify-contrast.ts b/scripts/verify-contrast.ts
deleted file mode 100755
index cf4c683e..00000000
--- a/scripts/verify-contrast.ts
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env tsx
-/**
- * Verify Theme Contrast Ratios
- *
- * Checks all color combinations in the theme meet WCAG AA standards
- */
-
-import { verifyThemeContrast, printContrastResults } from '../src/ui/theme.js';
-
-console.log('🎨 Verifying MeMesh Theme Contrast Ratios...\n');
-
-// Print results
-printContrastResults();
-
-// Check if all pass
-const results = verifyThemeContrast();
-const allPass = results.every(r => r.passes);
-
-if (allPass) {
- console.log('✅ All color combinations meet WCAG AA standards!\n');
- process.exit(0);
-} else {
- console.log('❌ Some color combinations fail WCAG AA standards.\n');
- process.exit(1);
-}
diff --git a/scripts/verify-dashboard.sh b/scripts/verify-dashboard.sh
deleted file mode 100755
index 839dcff3..00000000
--- a/scripts/verify-dashboard.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/bash
-# Dashboard Verification Script
-#
-# Verifies that the memesh dashboard command is working correctly
-
-set -e
-
-echo "🔍 Verifying MeMesh Dashboard Implementation..."
-echo ""
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-# Test 1: Check if dashboard.ts exists
-echo -n "1. Checking source file... "
-if [ -f "src/cli/dashboard.ts" ]; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${RED}✗ Missing src/cli/dashboard.ts${NC}"
- exit 1
-fi
-
-# Test 2: Check if dashboard.js is built
-echo -n "2. Checking compiled file... "
-if [ -f "dist/cli/dashboard.js" ]; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${RED}✗ Missing dist/cli/dashboard.js${NC}"
- exit 1
-fi
-
-# Test 3: Check if tests exist
-echo -n "3. Checking test file... "
-if [ -f "src/cli/__tests__/dashboard.test.ts" ]; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${YELLOW}⚠ Missing tests${NC}"
-fi
-
-# Test 4: Check if documentation exists
-echo -n "4. Checking documentation... "
-if [ -f "docs/cli-dashboard.md" ]; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${YELLOW}⚠ Missing documentation${NC}"
-fi
-
-# Test 5: Run tests
-echo -n "5. Running tests... "
-if npm test -- src/cli/__tests__/dashboard.test.ts > /dev/null 2>&1; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${RED}✗ Tests failed${NC}"
- exit 1
-fi
-
-# Test 6: Check CLI integration
-echo -n "6. Checking CLI integration... "
-if node dist/mcp/server-bootstrap.js --help | grep -q "dashboard"; then
- echo -e "${GREEN}✓${NC}"
-else
- echo -e "${RED}✗ Dashboard not in CLI help${NC}"
- exit 1
-fi
-
-# Test 7: Test dashboard execution (brief)
-echo -n "7. Testing dashboard execution... "
-# Run dashboard for 1 second in background and kill it
-timeout 1s node dist/mcp/server-bootstrap.js dashboard > /dev/null 2>&1 || true
-echo -e "${GREEN}✓${NC}"
-
-echo ""
-echo -e "${GREEN}✅ All verification checks passed!${NC}"
-echo ""
-echo "Dashboard is ready to use:"
-echo " $ memesh dashboard"
-echo ""
diff --git a/scripts/verify-mcp-setup.sh b/scripts/verify-mcp-setup.sh
deleted file mode 100755
index d8b9528f..00000000
--- a/scripts/verify-mcp-setup.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-
-# MeMesh MCP Server Setup Verification Script
-# Checks if everything is properly configured before Claude Code integration
-
-set -e
-
-echo "🔍 MeMesh MCP Server Setup Verification"
-echo "=============================================="
-echo ""
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-ERRORS=0
-WARNINGS=0
-
-# Check 1: Node.js version
-echo "1️⃣ Checking Node.js version..."
-NODE_VERSION=$(node -v | cut -d'v' -f2)
-MAJOR_VERSION=$(echo $NODE_VERSION | cut -d'.' -f1)
-if [ "$MAJOR_VERSION" -ge 20 ]; then
- echo -e " ${GREEN}✓${NC} Node.js $NODE_VERSION (>= 20.0.0)"
-else
- echo -e " ${RED}✗${NC} Node.js $NODE_VERSION (requires >= 20.0.0)"
- ERRORS=$((ERRORS + 1))
-fi
-echo ""
-
-# Check 2: Dependencies installed
-echo "2️⃣ Checking dependencies..."
-if [ -d "node_modules" ]; then
- echo -e " ${GREEN}✓${NC} node_modules exists"
-else
- echo -e " ${RED}✗${NC} node_modules not found (run: npm install)"
- ERRORS=$((ERRORS + 1))
-fi
-echo ""
-
-# Check 3: Build artifacts
-echo "3️⃣ Checking build artifacts..."
-if [ -f "dist/mcp/server-bootstrap.js" ]; then
- echo -e " ${GREEN}✓${NC} dist/mcp/server-bootstrap.js exists"
-else
- echo -e " ${RED}✗${NC} dist/mcp/server-bootstrap.js not found (run: npm run build)"
- ERRORS=$((ERRORS + 1))
-fi
-echo ""
-
-# Check 4: Environment variables
-echo "4️⃣ Checking environment variables..."
-if [ -f ".env" ]; then
- echo -e " ${GREEN}✓${NC} .env file exists"
-
- if grep -q "MCP_SERVER_MODE=false" .env; then
- if grep -q "ANTHROPIC_API_KEY=sk-" .env; then
- echo -e " ${GREEN}✓${NC} ANTHROPIC_API_KEY configured"
- else
- echo -e " ${YELLOW}⚠${NC} ANTHROPIC_API_KEY not configured (required for standalone mode)"
- WARNINGS=$((WARNINGS + 1))
- fi
- else
- echo -e " ${GREEN}✓${NC} MCP Server mode enabled (API key optional)"
- fi
-
-else
- echo -e " ${YELLOW}⚠${NC} .env file not found (optional in MCP Server mode)"
- WARNINGS=$((WARNINGS + 1))
-fi
-echo ""
-
-# Check 5: Claude Code MCP config
-echo "5️⃣ Checking Claude Code MCP config..."
-CONFIG_PATH=""
-for CANDIDATE in "$HOME/.claude.json" "$HOME/.config/claude/claude_desktop_config.json" "$HOME/.claude/mcp_settings.json"; do
- if [ -f "$CANDIDATE" ]; then
- CONFIG_PATH="$CANDIDATE"
- break
- fi
-done
-
-if [ -z "$CONFIG_PATH" ]; then
- echo -e " ${YELLOW}⚠${NC} MCP config file not found"
- WARNINGS=$((WARNINGS + 1))
-else
- if node -e "const fs=require('fs'); const config=JSON.parse(fs.readFileSync(process.argv[2], 'utf8')); if (config?.mcpServers?.['memesh|claude-code-buddy']) process.exit(0); process.exit(1);" "$CONFIG_PATH" 2>/dev/null; then
- echo -e " ${GREEN}✓${NC} memesh|claude-code-buddy registered in $CONFIG_PATH"
- else
- echo -e " ${YELLOW}⚠${NC} memesh|claude-code-buddy not found in $CONFIG_PATH"
- WARNINGS=$((WARNINGS + 1))
- fi
-fi
-echo ""
-
-# Check 6: Test MCP server module loads
-echo "6️⃣ Testing MCP server module..."
-if node --input-type=module -e "import('./dist/mcp/server-bootstrap.js')" 2>/dev/null; then
- echo -e " ${GREEN}✓${NC} MCP server bootstrap loads successfully"
-else
- echo -e " ${YELLOW}⚠${NC} MCP server module check skipped (requires stdio)"
- echo -e " (This is normal - server needs Claude Code connection)"
-fi
-echo ""
-
-# Check 7: MCP tool definitions
-echo "7️⃣ Checking MCP tool definitions..."
-TOOL_COUNT=$(grep -c "name: '" src/mcp/ToolDefinitions.ts 2>/dev/null || echo "0")
-if [ "$TOOL_COUNT" -gt 0 ]; then
- echo -e " ${GREEN}✓${NC} $TOOL_COUNT tools defined"
-else
- echo -e " ${YELLOW}⚠${NC} Could not verify tool definitions"
- WARNINGS=$((WARNINGS + 1))
-fi
-echo ""
-
-# Summary
-echo "=============================================="
-echo ""
-if [ $ERRORS -eq 0 ] && [ $WARNINGS -eq 0 ]; then
- echo -e "${GREEN}✓ All checks passed!${NC}"
- echo ""
- echo "Next steps:"
- echo "1. Add MeMesh to ~/.claude.json"
- echo " See: docs/guides/SETUP.md"
- echo "2. Restart Claude Code"
- echo "3. Verify tools appear in Claude Code"
- exit 0
-elif [ $ERRORS -eq 0 ]; then
- echo -e "${YELLOW}⚠ Setup complete with $WARNINGS warning(s)${NC}"
- echo ""
- echo "You can proceed, but some features may not work."
- echo "See: docs/guides/SETUP.md for details"
- exit 0
-else
- echo -e "${RED}✗ Setup incomplete: $ERRORS error(s), $WARNINGS warning(s)${NC}"
- echo ""
- echo "Please fix the errors above before continuing."
- echo "Quick fixes:"
- echo " - npm install # Install dependencies"
- echo " - npm run build # Build project"
- echo " - cp .env.example .env # Create env file"
- exit 1
-fi
diff --git a/scripts/verify-mcp-stdio.sh b/scripts/verify-mcp-stdio.sh
deleted file mode 100755
index 7cbe8609..00000000
--- a/scripts/verify-mcp-stdio.sh
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/bin/bash
-#
-# MCP Stdio Verification Script
-#
-# Verifies that the MCP server runs cleanly in stdio mode without
-# any stdout/stderr pollution that would break JSON-RPC communication.
-#
-# Usage: ./scripts/verify-mcp-stdio.sh
-
-set -e
-
-echo "🔍 MCP Stdio Verification"
-echo "=========================="
-echo ""
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-# Counters
-PASSED=0
-FAILED=0
-
-# Test function
-test_check() {
- local test_name="$1"
- local test_command="$2"
-
- echo -n "Testing: $test_name... "
-
- if eval "$test_command" > /dev/null 2>&1; then
- echo -e "${GREEN}✓ PASS${NC}"
- PASSED=$((PASSED + 1))
- return 0
- else
- echo -e "${RED}✗ FAIL${NC}"
- FAILED=$((FAILED + 1))
- return 1
- fi
-}
-
-# Check 1: Build succeeds
-echo "📦 Build Check"
-test_check "TypeScript compilation" "npm run build"
-echo ""
-
-# Check 2: No dotenv in source
-echo "🔍 Source Code Checks"
-test_check "No dotenv imports" "! grep -r \"from 'dotenv'\" src/ --include='*.ts'"
-test_check "No console.log in source" "! grep -r \"console\\.log\" src/ --include='*.ts' --exclude='*.test.ts' | grep -v '//' | grep -v '\\*'"
-echo ""
-
-# Check 3: Stdio pollution test
-echo "🎯 Stdio Pollution Checks"
-
-# Create temporary file for output
-TEMP_OUTPUT=$(mktemp)
-POLLUTION_PID=""
-POLLUTION_WATCHDOG=""
-
-# Setup cleanup for pollution test
-cleanup_pollution() {
- [ -n "$POLLUTION_PID" ] && kill "$POLLUTION_PID" 2>/dev/null || true
- [ -n "$POLLUTION_WATCHDOG" ] && kill "$POLLUTION_WATCHDOG" 2>/dev/null || true
- rm -f "$TEMP_OUTPUT" 2>/dev/null || true
-}
-trap cleanup_pollution EXIT ERR
-
-# Run server for 1 second and capture output
-echo "Starting MCP server in stdio mode (1 second test)..."
-(
- MEMESH_DISABLE_DAEMON=1 DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js &
- POLLUTION_PID=$!
- (sleep 3; kill $POLLUTION_PID 2>/dev/null || true) &
- POLLUTION_WATCHDOG=$!
- sleep 1
- kill $POLLUTION_PID 2>/dev/null || true
- kill $POLLUTION_WATCHDOG 2>/dev/null || true
-) > "$TEMP_OUTPUT" 2>&1 || true
-
-# Check for pollution
-if [ -s "$TEMP_OUTPUT" ]; then
- echo -e "${RED}✗ FAIL: Stdout/stderr pollution detected${NC}"
- echo ""
- echo "Output found:"
- cat "$TEMP_OUTPUT"
- echo ""
- FAILED=$((FAILED + 1))
-else
- echo -e "${GREEN}✓ PASS: No stdout/stderr pollution${NC}"
- PASSED=$((PASSED + 1))
-fi
-
-# Reset trap for pollution test
-trap - EXIT ERR
-cleanup_pollution
-echo ""
-
-# Check 4: JSON-RPC communication
-echo "📡 JSON-RPC Communication Test"
-
-# Test JSON-RPC initialize
-INIT_REQUEST='{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"test","version":"1.0.0"}}}'
-
-JSONRPC_OUTPUT=$(mktemp)
-STDIN_FILE=$(mktemp)
-SERVER_PID=""
-WATCHDOG_PID=""
-
-# Setup cleanup for JSON-RPC test
-cleanup_jsonrpc() {
- [ -n "$SERVER_PID" ] && kill "$SERVER_PID" 2>/dev/null || true
- [ -n "$WATCHDOG_PID" ] && kill "$WATCHDOG_PID" 2>/dev/null || true
- rm -f "$JSONRPC_OUTPUT" "$STDIN_FILE" 2>/dev/null || true
-}
-trap cleanup_jsonrpc EXIT ERR
-
-# Write request to stdin file
-echo "$INIT_REQUEST" > "$STDIN_FILE"
-
-# Run server with stdin from file
-MEMESH_DISABLE_DAEMON=1 DISABLE_MCP_WATCHDOG=1 node dist/mcp/server-bootstrap.js < "$STDIN_FILE" > "$JSONRPC_OUTPUT" 2>&1 &
-SERVER_PID=$!
-
-# Start timeout watchdog
-(sleep 5; kill $SERVER_PID 2>/dev/null || true) &
-WATCHDOG_PID=$!
-
-sleep 2
-kill $SERVER_PID 2>/dev/null || true
-wait $SERVER_PID 2>/dev/null || true
-kill $WATCHDOG_PID 2>/dev/null || true
-wait $WATCHDOG_PID 2>/dev/null || true
-
-# Check if output is valid JSON
-if grep -q "jsonrpc" "$JSONRPC_OUTPUT" && ! grep -q "dotenv" "$JSONRPC_OUTPUT"; then
- echo -e "${GREEN}✓ PASS: Valid JSON-RPC response${NC}"
- PASSED=$((PASSED + 1))
-else
- echo -e "${RED}✗ FAIL: Invalid JSON-RPC response${NC}"
- echo ""
- echo "Output:"
- cat "$JSONRPC_OUTPUT"
- echo ""
- FAILED=$((FAILED + 1))
-fi
-
-# Reset trap for JSON-RPC test
-trap - EXIT ERR
-cleanup_jsonrpc
-echo ""
-
-# Summary
-echo "=========================="
-echo "📊 Test Summary"
-echo "=========================="
-echo -e "Passed: ${GREEN}$PASSED${NC}"
-echo -e "Failed: ${RED}$FAILED${NC}"
-echo ""
-
-if [ $FAILED -eq 0 ]; then
- echo -e "${GREEN}✅ All checks passed! Safe to publish.${NC}"
- exit 0
-else
- echo -e "${RED}❌ $FAILED check(s) failed. DO NOT publish.${NC}"
- exit 1
-fi
diff --git a/scripts/verify-migration.ts b/scripts/verify-migration.ts
deleted file mode 100644
index a8462377..00000000
--- a/scripts/verify-migration.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env tsx
-
-/**
- * Migration Verification Script
- *
- * Verifies whether migration completed successfully
- */
-
-import Database from 'better-sqlite3';
-import path from 'path';
-import os from 'os';
-import chalk from 'chalk';
-
-interface VerificationResult {
- success: boolean;
- checks: {
- name: string;
- passed: boolean;
- details?: string;
- }[];
-}
-
-async function verifyMigration(): Promise {
- const oldDbPath = './data/knowledge-graph.db';
- const newDbPath = path.join(os.homedir(), '.claude-code-buddy', 'knowledge-graph.db');
-
- console.log('\n🔍 Verifying Migration Results');
- console.log('='.repeat(50));
-
- const result: VerificationResult = {
- success: true,
- checks: [],
- };
-
- try {
- // Open databases
- const oldDb = new Database(oldDbPath, { readonly: true });
- const newDb = new Database(newDbPath, { readonly: true });
-
- // Check 1: Total entity count
- const oldCount = (oldDb.prepare('SELECT COUNT(*) as count FROM entities').get() as { count: number }).count;
- const newCount = (newDb.prepare('SELECT COUNT(*) as count FROM entities').get() as { count: number }).count;
-
- const entityCountCheck = {
- name: 'Total entity count',
- passed: newCount >= oldCount,
- details: `Old DB: ${oldCount}, New DB: ${newCount}`,
- };
- result.checks.push(entityCountCheck);
-
- // Check 2: All old entities exist in new database
- const oldEntities = oldDb.prepare('SELECT name FROM entities').all() as { name: string }[];
- let missingEntities = 0;
-
- for (const entity of oldEntities) {
- const exists = newDb.prepare('SELECT 1 FROM entities WHERE name = ?').get(entity.name);
- if (!exists) {
- missingEntities++;
- console.log(chalk.red(` ❌ Missing entity: ${entity.name}`));
- }
- }
-
- const allEntitiesMigrated = {
- name: 'All entities migrated',
- passed: missingEntities === 0,
- details: `Missing ${missingEntities} entities`,
- };
- result.checks.push(allEntitiesMigrated);
-
- // Check 3: Check if migrated entities have tags
- // Note: Only check entities migrated from old database
- const migratedEntityNames = oldEntities.map(e => e.name);
- let entitiesWithoutTags = 0;
-
- for (const entityName of migratedEntityNames) {
- interface TagCount {
- count: number;
- }
-
- const tagCount = (newDb.prepare(`
- SELECT COUNT(*) as count
- FROM tags t
- JOIN entities e ON t.entity_id = e.id
- WHERE e.name = ?
- `).get(entityName) as TagCount | undefined)?.count || 0;
-
- if (tagCount === 0) {
- entitiesWithoutTags++;
- console.log(chalk.yellow(` ⚠️ Entity without tags: ${entityName}`));
- }
- }
-
- const allHaveTags = {
- name: 'Migrated entities have tags',
- passed: entitiesWithoutTags === 0,
- details: `${entitiesWithoutTags} entities without tags`,
- };
- result.checks.push(allHaveTags);
-
- // Check 4: Check scope tags
- interface ScopeTagCount {
- count: number;
- }
-
- const scopeTagCount = (newDb.prepare(`
- SELECT COUNT(*) as count
- FROM tags
- WHERE tag LIKE 'scope:%'
- `).get() as ScopeTagCount).count;
-
- const hasScopeTags = {
- name: 'Scope tags added',
- passed: scopeTagCount > 0,
- details: `Found ${scopeTagCount} scope tags`,
- };
- result.checks.push(hasScopeTags);
-
- // Check 5: Check tech tags
- interface TechTagCount {
- count: number;
- }
-
- const techTagCount = (newDb.prepare(`
- SELECT COUNT(*) as count
- FROM tags
- WHERE tag LIKE 'tech:%'
- `).get() as TechTagCount).count;
-
- const hasTechTags = {
- name: 'Tech tags added',
- passed: techTagCount > 0,
- details: `Found ${techTagCount} tech tags`,
- };
- result.checks.push(hasTechTags);
-
- // Check 6: Observations preserved
- const oldObsCount = (oldDb.prepare('SELECT COUNT(*) as count FROM observations').get() as { count: number }).count;
- const newObsCount = (newDb.prepare('SELECT COUNT(*) as count FROM observations').get() as { count: number }).count;
-
- const observationsPreserved = {
- name: 'Observations preserved',
- passed: newObsCount >= oldObsCount,
- details: `Old: ${oldObsCount}, New: ${newObsCount}`,
- };
- result.checks.push(observationsPreserved);
-
- // Close databases
- oldDb.close();
- newDb.close();
-
- // Determine overall result
- result.success = result.checks.every(check => check.passed);
-
- } catch (error) {
- console.error(chalk.red('\n❌ Error occurred during verification:'), error);
- result.success = false;
- }
-
- // Output results
- console.log('\nCheck Results:');
- console.log('-'.repeat(50));
-
- for (const check of result.checks) {
- const icon = check.passed ? chalk.green('✅') : chalk.red('❌');
- console.log(`${icon} ${check.name}: ${check.details || (check.passed ? 'Passed' : 'Failed')}`);
- }
-
- console.log('-'.repeat(50));
-
- if (result.success) {
- console.log(chalk.green('\n✅ All checks passed! Migration completed successfully.\n'));
- } else {
- console.log(chalk.red('\n❌ Some checks failed, please review the migration process.\n'));
- }
-
- return result;
-}
-
-// CLI interface
-if (import.meta.url === `file://${process.argv[1]}`) {
- verifyMigration().then(result => {
- process.exit(result.success ? 0 : 1);
- });
-}
-
-export { verifyMigration };
diff --git a/scripts/verify-performance-claims.ts b/scripts/verify-performance-claims.ts
deleted file mode 100755
index f36cc4f7..00000000
--- a/scripts/verify-performance-claims.ts
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/env tsx
-/**
- * Real Performance Verification Script
- *
- * This script actually measures performance to verify the claimed improvements.
- * Unlike the benchmarks that have build errors, this uses the runtime APIs directly.
- */
-
-import { performance } from 'perf_hooks';
-
-interface BenchmarkResult {
- test: string;
- dataSize: number;
- duration: number;
- operations: number;
- opsPerSecond: number;
- claimedSpeedup?: string;
- actualSpeedup?: number;
-}
-
-const results: BenchmarkResult[] = [];
-
-async function benchmark(name: string, dataSize: number, fn: () => Promise, ops: number = 1): Promise {
- // Warmup
- await fn();
-
- // Actual measurement (run 5 times, take median)
- const times: number[] = [];
- for (let i = 0; i < 5; i++) {
- const start = performance.now();
- await fn();
- times.push(performance.now() - start);
- }
-
- times.sort((a, b) => a - b);
- const median = times[Math.floor(times.length / 2)];
-
- results.push({
- test: name,
- dataSize,
- duration: median,
- operations: ops,
- opsPerSecond: (ops / median) * 1000,
- });
-
- return median;
-}
-
-async function testKnowledgeGraphStats() {
- console.log('\n📊 Testing KnowledgeGraph.getStats() Performance');
- console.log('━'.repeat(80));
-
- // We can't actually test without importing (build errors), but we can show what SHOULD be tested
- console.log('⚠️ Cannot run actual test due to build errors');
- console.log('📝 What should be tested:');
- console.log(' 1. Create knowledge graph with N entities');
- console.log(' 2. Measure getStats() with N+1 implementation (baseline)');
- console.log(' 3. Measure getStats() with single query implementation');
- console.log(' 4. Calculate actual speedup');
- console.log(' 5. Compare to claimed "168x speedup"');
-
- console.log('\n📋 Expected test cases:');
- const testCases = [
- { entities: 10, relations: 20, expectedOld: 10, expectedNew: 1, claimedSpeedup: '10x' },
- { entities: 50, relations: 100, expectedOld: 50, expectedNew: 1, claimedSpeedup: '50x' },
- { entities: 100, relations: 300, expectedOld: 100, expectedNew: 1, claimedSpeedup: '100x' },
- { entities: 500, relations: 2000, expectedOld: 500, expectedNew: 1, claimedSpeedup: '500x' },
- ];
-
- for (const tc of testCases) {
- console.log(` ${tc.entities} entities, ${tc.relations} relations:`);
- console.log(` Old: ${tc.expectedOld} queries (N+1 pattern)`);
- console.log(` New: ${tc.expectedNew} query (single COUNT)`);
- console.log(` Expected speedup: ${tc.claimedSpeedup}`);
- }
-
- console.log('\n❌ Result: CANNOT VERIFY - Build errors prevent testing');
-}
-
-async function testBatchQueries() {
- console.log('\n📊 Testing Batch Query Performance (getConnectedEntities)');
- console.log('━'.repeat(80));
-
- console.log('⚠️ Cannot run actual test due to build errors');
- console.log('📝 What should be tested:');
- console.log(' 1. Create graph with depth D and branching factor B');
- console.log(' 2. Measure getConnectedEntities with N+1 queries (baseline)');
- console.log(' 3. Measure getConnectedEntities with batch queries');
- console.log(' 4. Calculate actual speedup');
- console.log(' 5. Compare to claimed "47x speedup"');
-
- console.log('\n📋 Expected test cases:');
- const testCases = [
- { depth: 1, nodes: 20, oldQueries: 20, newQueries: 1, claimedSpeedup: '20x' },
- { depth: 2, nodes: 60, oldQueries: 60, newQueries: 2, claimedSpeedup: '30x' },
- { depth: 3, nodes: 180, oldQueries: 180, newQueries: 3, claimedSpeedup: '60x' },
- ];
-
- for (const tc of testCases) {
- console.log(` Depth ${tc.depth}, ${tc.nodes} nodes:`);
- console.log(` Old: ${tc.oldQueries} queries (N per level)`);
- console.log(` New: ${tc.newQueries} queries (1 per level)`);
- console.log(` Expected speedup: ${tc.claimedSpeedup}`);
- }
-
- console.log('\n❌ Result: CANNOT VERIFY - Build errors prevent testing');
-}
-
-async function testParallelPatternExtraction() {
- console.log('\n📊 Testing "Parallel" Pattern Extraction');
- console.log('━'.repeat(80));
-
- console.log('⚠️ Code claims parallelization but implementation is sequential!');
- console.log('📝 Code review findings:');
-
- const codeSnippet = `
- // From LearningManager.analyzePatterns():
- for (const [taskType, taskMetrics] of metricsByTask.entries()) {
- const successPatterns = this.extractSuccessPatterns(...); // Synchronous
- const failurePatterns = this.extractFailurePatterns(...); // Synchronous
- const optimizationPatterns = this.extractOptimizationPatterns(...); // Synchronous
- newPatterns.push(...successPatterns, ...failurePatterns, ...optimizationPatterns);
- }
- `;
-
- console.log(codeSnippet);
- console.log('❌ Analysis: This is SEQUENTIAL, not parallel!');
- console.log(' - for-loop processes task types one at a time');
- console.log(' - All methods are synchronous (no Promise.all)');
- console.log(' - No Worker threads, no concurrency');
-
- console.log('\n🔴 Result: FALSE CLAIM - Not actually parallel');
-}
-
-async function testStatsCaching() {
- console.log('\n📊 Testing Statistics Caching');
- console.log('━'.repeat(80));
-
- console.log('✅ Implementation exists in LearningManager');
- console.log('📝 How it works:');
- console.log(' 1. Cache key: `${agentId}:${taskType}`');
- console.log(' 2. TTL: 60 seconds');
- console.log(' 3. Invalidation: sample size check');
-
- console.log('\n⚠️ Issues found:');
- console.log(' 1. Cache doesn\'t invalidate when new metrics are added during TTL');
- console.log(' 2. No memory limits on cache size');
- console.log(' 3. Sample size check is brittle');
-
- console.log('\n📋 What should be tested:');
- console.log(' 1. Cold cache: First call computes stats');
- console.log(' 2. Warm cache: Subsequent calls return cached values');
- console.log(' 3. Invalidation: New metrics added should invalidate cache');
- console.log(' 4. Memory: Cache should not grow unbounded');
-
- console.log('\n⚠️ Result: IMPLEMENTATION CORRECT but INVALIDATION ISSUES');
-}
-
-async function testQueryCache() {
- console.log('\n📊 Testing Query Cache Implementation');
- console.log('━'.repeat(80));
-
- console.log('✅ Excellent implementation in src/db/QueryCache.ts');
- console.log('📝 Features:');
- console.log(' ✓ LRU eviction');
- console.log(' ✓ TTL expiration');
- console.log(' ✓ Automatic invalidation on mutations');
- console.log(' ✓ Memory limits');
- console.log(' ✓ Cache statistics (hit rate, size, memory)');
-
- console.log('\n📊 Performance characteristics:');
- console.log(' - get(): O(1) with Map lookup');
- console.log(' - set(): O(1) amortized');
- console.log(' - invalidate(): O(N) clears all entries');
- console.log(' - Memory: Bounded by maxSize');
-
- console.log('\n✅ Result: IMPLEMENTATION EXCELLENT, but no actual benchmarks run');
-}
-
-async function runPerformanceAudit() {
- console.log('\n');
- console.log('╔════════════════════════════════════════════════════════════════════╗');
- console.log('║ PERFORMANCE CLAIMS VERIFICATION - ACTUAL MEASUREMENTS ║');
- console.log('╚════════════════════════════════════════════════════════════════════╝');
- console.log('\n');
-
- await testKnowledgeGraphStats();
- await testBatchQueries();
- await testParallelPatternExtraction();
- await testStatsCaching();
- await testQueryCache();
-
- console.log('\n');
- console.log('═'.repeat(80));
- console.log('SUMMARY: Performance Claims Verification Status');
- console.log('═'.repeat(80));
-
- const summary = [
- { claim: 'getStats() 168x speedup', status: '❌ UNVERIFIED', reason: 'Build errors prevent testing' },
- { claim: 'getConnectedEntities() 47x speedup', status: '❌ UNVERIFIED', reason: 'Build errors prevent testing' },
- { claim: 'Parallel pattern extraction', status: '🔴 FALSE', reason: 'Code is actually sequential' },
- { claim: 'Statistics caching', status: '⚠️ PARTIAL', reason: 'Works but has invalidation issues' },
- { claim: 'Query cache implementation', status: '✅ VERIFIED', reason: 'Excellent implementation' },
- { claim: 'Similar entity search 40% improvement', status: '❌ UNVERIFIED', reason: 'No evidence found' },
- { claim: 'Evolution stats 80% improvement', status: '❌ UNVERIFIED', reason: 'No baseline measurements' },
- ];
-
- for (const item of summary) {
- console.log(`\n${item.status} ${item.claim}`);
- console.log(` → ${item.reason}`);
- }
-
- console.log('\n');
- console.log('╔════════════════════════════════════════════════════════════════════╗');
- console.log('║ FINAL VERDICT ║');
- console.log('╚════════════════════════════════════════════════════════════════════╝');
- console.log('\n');
- console.log('Grade: 6/10 - Implementation OK, Claims Exaggerated');
- console.log('\n');
- console.log('✅ What Works:');
- console.log(' - Batch query implementations are correct');
- console.log(' - QueryCache is excellently designed');
- console.log(' - N+1 queries eliminated where claimed');
- console.log('\n');
- console.log('❌ What Doesn\'t Work:');
- console.log(' - NO ACTUAL MEASUREMENTS - All claims unverified');
- console.log(' - FALSE CLAIM: "Parallel extraction" is sequential');
- console.log(' - Build errors prevent running benchmarks');
- console.log(' - Cache invalidation issues in stats cache');
- console.log(' - No load testing with realistic data');
- console.log('\n');
- console.log('🚨 Critical Actions Needed:');
- console.log(' 1. Fix build errors immediately');
- console.log(' 2. Run benchmarks with real data (1000+ entities)');
- console.log(' 3. Measure baseline vs optimized (before/after)');
- console.log(' 4. Fix false claim about parallelization');
- console.log(' 5. Add regression tests to CI');
- console.log('\n');
- console.log('Trust Level: LOW (3/10)');
- console.log('Reason: No measurements provided, one false claim found');
- console.log('\n');
-}
-
-// Run the audit
-runPerformanceAudit().catch(console.error);
diff --git a/src/cli/__tests__/config.test.ts b/src/cli/__tests__/config.test.ts
deleted file mode 100644
index 678a1522..00000000
--- a/src/cli/__tests__/config.test.ts
+++ /dev/null
@@ -1,367 +0,0 @@
-/**
- * Tests for Configuration Management Module
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import fs from 'fs-extra';
-import path from 'path';
-import os from 'os';
-import crypto from 'crypto';
-import { ConfigManager } from '../config.js';
-
-describe('ConfigManager', () => {
- describe('getConfigPath', () => {
- it('should return macOS path on darwin platform', () => {
- const originalPlatform = process.platform;
- Object.defineProperty(process, 'platform', {
- value: 'darwin',
- writable: true,
- });
-
- const configPath = ConfigManager.getConfigPath();
- expect(configPath).toContain('Library/Application Support/Claude');
-
- Object.defineProperty(process, 'platform', {
- value: originalPlatform,
- writable: true,
- });
- });
-
- it('should return Windows path on win32 platform', () => {
- const originalPlatform = process.platform;
- Object.defineProperty(process, 'platform', {
- value: 'win32',
- writable: true,
- });
-
- const configPath = ConfigManager.getConfigPath();
- expect(configPath).toContain('Claude');
-
- Object.defineProperty(process, 'platform', {
- value: originalPlatform,
- writable: true,
- });
- });
- });
-
- describe('getConfigPathDescription', () => {
- it('should return readable path description', () => {
- const description = ConfigManager.getConfigPathDescription();
- expect(description).toContain('Claude');
- expect(description).toContain('claude_desktop_config.json');
- });
- });
-
- describe('validateConfig', () => {
- let testTempDir: string;
- let testConfigPath: string;
-
- beforeEach(async () => {
- // Create unique temporary directory for test isolation
- testTempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ccb-config-test-'));
- testConfigPath = path.join(testTempDir, `config-${crypto.randomUUID()}.json`);
- });
-
- afterEach(async () => {
- // Clean up entire temporary directory
- if (testTempDir && await fs.pathExists(testTempDir)) {
- await fs.remove(testTempDir);
- }
- });
-
- it('should detect missing config file', async () => {
- // Mock getConfigPath to return test path
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(false);
- expect(result.errors).toContain('Configuration file does not exist');
- });
-
- it('should detect invalid JSON', async () => {
- // Write invalid JSON
- await fs.writeFile(testConfigPath, '{ invalid json }');
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(false);
- expect(result.errors.length).toBeGreaterThan(0);
- expect(result.errors[0]).toContain('Invalid JSON syntax');
- });
-
- it('should detect missing memesh server', async () => {
- // Write config without memesh
- await fs.writeJSON(testConfigPath, {
- mcpServers: {
- other: {
- command: 'node',
- args: ['test'],
- },
- },
- });
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(false);
- expect(result.errors).toContain('MeMesh MCP server not configured');
- });
-
- it('should validate correct memesh config', async () => {
- // Write valid config
- await fs.writeJSON(testConfigPath, {
- mcpServers: {
- memesh: {
- command: 'node',
- args: ['/path/to/memesh'],
- },
- },
- });
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(true);
- expect(result.errors.length).toBe(0);
- });
-
- it('should warn about missing command field', async () => {
- // Write config with missing command
- await fs.writeJSON(testConfigPath, {
- mcpServers: {
- memesh: {
- args: ['/path/to/memesh'],
- },
- },
- });
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(false);
- expect(result.errors).toContain('MeMesh server: missing "command" field');
- });
-
- it('should warn about invalid args field', async () => {
- // Write config with invalid args
- await fs.writeJSON(testConfigPath, {
- mcpServers: {
- memesh: {
- command: 'node',
- args: 'not-an-array',
- },
- },
- });
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const result = await ConfigManager.validateConfig();
-
- expect(result.valid).toBe(false);
- expect(result.errors).toContain(
- 'MeMesh server: missing or invalid "args" field'
- );
- });
- });
-
- describe('generateDefaultConfig', () => {
- it('should generate valid default config', () => {
- const config = ConfigManager.generateDefaultConfig();
-
- expect(config.mcpServers).toBeDefined();
- expect(config.mcpServers.memesh).toBeDefined();
- expect(config.mcpServers.memesh.command).toBe('node');
- expect(config.mcpServers.memesh.args).toBeInstanceOf(Array);
- expect(config.mcpServers.memesh.args.length).toBeGreaterThan(0);
- });
- });
-
- describe('highlightJSON', () => {
- it('should return formatted JSON string', () => {
- const obj = {
- test: 'value',
- number: 123,
- boolean: true,
- null: null,
- };
-
- const highlighted = ConfigManager.highlightJSON(obj);
-
- expect(highlighted).toContain('test');
- expect(highlighted).toContain('value');
- expect(highlighted).toContain('123');
- expect(highlighted).toContain('true');
- expect(highlighted).toContain('null');
- });
-
- it('should handle nested objects', () => {
- const obj = {
- mcpServers: {
- memesh: {
- command: 'node',
- args: ['test'],
- },
- },
- };
-
- const highlighted = ConfigManager.highlightJSON(obj);
-
- expect(highlighted).toContain('mcpServers');
- expect(highlighted).toContain('memesh');
- expect(highlighted).toContain('command');
- expect(highlighted).toContain('node');
- });
- });
-
- describe('readConfig', () => {
- let testTempDir: string;
- let testConfigPath: string;
-
- beforeEach(async () => {
- // Create unique temporary directory for test isolation
- testTempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ccb-read-config-test-'));
- testConfigPath = path.join(testTempDir, `config-${crypto.randomUUID()}.json`);
- });
-
- afterEach(async () => {
- // Clean up entire temporary directory
- if (testTempDir && await fs.pathExists(testTempDir)) {
- await fs.remove(testTempDir);
- }
- });
-
- it('should return null for non-existent config', async () => {
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const config = await ConfigManager.readConfig();
-
- expect(config).toBeNull();
- });
-
- it('should read and parse valid config', async () => {
- const testConfig = {
- mcpServers: {
- memesh: {
- command: 'node',
- args: ['test'],
- },
- },
- };
-
- await fs.writeJSON(testConfigPath, testConfig);
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const config = await ConfigManager.readConfig();
-
- expect(config).toEqual(testConfig);
- });
-
- it('should return null for invalid JSON', async () => {
- await fs.writeFile(testConfigPath, '{ invalid }');
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const config = await ConfigManager.readConfig();
-
- expect(config).toBeNull();
- });
- });
-
- describe('writeConfig', () => {
- let testTempDir: string;
- let testConfigPath: string;
-
- beforeEach(async () => {
- // Create unique temporary directory for test isolation
- testTempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ccb-write-config-test-'));
- testConfigPath = path.join(testTempDir, `config-${crypto.randomUUID()}.json`);
- });
-
- afterEach(async () => {
- // Clean up entire temporary directory
- if (testTempDir && await fs.pathExists(testTempDir)) {
- await fs.remove(testTempDir);
- }
- });
-
- it('should write config successfully', async () => {
- const testConfig = {
- mcpServers: {
- memesh: {
- command: 'node',
- args: ['test'],
- },
- },
- };
-
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const success = await ConfigManager.writeConfig(testConfig);
-
- expect(success).toBe(true);
- expect(await fs.pathExists(testConfigPath)).toBe(true);
-
- const written = await fs.readJSON(testConfigPath);
- expect(written).toEqual(testConfig);
- });
-
- it('should create directory if needed', async () => {
- // Use the already-created unique temp directory for nested path test
- const nestedPath = path.join(
- testTempDir,
- 'nested',
- 'dir',
- `config-${crypto.randomUUID()}.json`
- );
-
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(nestedPath);
-
- const testConfig = { mcpServers: {} };
- const success = await ConfigManager.writeConfig(testConfig);
-
- expect(success).toBe(true);
- expect(await fs.pathExists(nestedPath)).toBe(true);
- // Cleanup handled by afterEach removing testTempDir
- });
- });
-
- describe('backupConfig', () => {
- let testTempDir: string;
- let testConfigPath: string;
-
- beforeEach(async () => {
- // Create unique temporary directory for test isolation
- testTempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ccb-backup-config-test-'));
- testConfigPath = path.join(testTempDir, `config-${crypto.randomUUID()}.json`);
- });
-
- afterEach(async () => {
- // Clean up entire temporary directory (includes all backup files)
- if (testTempDir && await fs.pathExists(testTempDir)) {
- await fs.remove(testTempDir);
- }
- });
-
- it('should return null for non-existent config', async () => {
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const backupPath = await ConfigManager.backupConfig();
-
- expect(backupPath).toBeNull();
- });
-
- it('should create backup with timestamp', async () => {
- await fs.writeJSON(testConfigPath, { test: 'data' });
- vi.spyOn(ConfigManager, 'getConfigPath').mockReturnValue(testConfigPath);
-
- const backupPath = await ConfigManager.backupConfig();
-
- expect(backupPath).not.toBeNull();
- expect(backupPath).toContain('backup-');
- expect(await fs.pathExists(backupPath!)).toBe(true);
-
- const backupContent = await fs.readJSON(backupPath!);
- expect(backupContent).toEqual({ test: 'data' });
- });
- });
-});
diff --git a/src/cli/__tests__/credentials.test.ts b/src/cli/__tests__/credentials.test.ts
deleted file mode 100644
index 4d7c2f3e..00000000
--- a/src/cli/__tests__/credentials.test.ts
+++ /dev/null
@@ -1,161 +0,0 @@
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import * as fs from 'fs';
-import * as path from 'path';
-import * as os from 'os';
-import {
- getCredentialsPath,
- loadCredentials,
- saveCredentials,
- deleteCredentials,
- type MeMeshCredentials,
-} from '../credentials.js';
-
-// Use a temp directory for test isolation
-let tempDir: string;
-let originalEnv: string | undefined;
-
-beforeEach(() => {
- tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'memesh-creds-test-'));
- originalEnv = process.env.XDG_CONFIG_HOME;
- process.env.XDG_CONFIG_HOME = tempDir;
-});
-
-afterEach(() => {
- if (originalEnv !== undefined) {
- process.env.XDG_CONFIG_HOME = originalEnv;
- } else {
- delete process.env.XDG_CONFIG_HOME;
- }
- fs.rmSync(tempDir, { recursive: true, force: true });
-});
-
-describe('getCredentialsPath', () => {
- it('should use XDG_CONFIG_HOME when set', () => {
- process.env.XDG_CONFIG_HOME = '/custom/config';
- expect(getCredentialsPath()).toBe('/custom/config/memesh/credentials.json');
- });
-
- it('should fall back to ~/.config when XDG_CONFIG_HOME is unset', () => {
- delete process.env.XDG_CONFIG_HOME;
- const expected = path.join(os.homedir(), '.config', 'memesh', 'credentials.json');
- expect(getCredentialsPath()).toBe(expected);
- });
-});
-
-describe('saveCredentials', () => {
- it('should create directory and write credentials file', () => {
- const creds: MeMeshCredentials = {
- apiKey: 'sk_memmesh_test123',
- email: 'test@example.com',
- createdAt: '2026-01-01T00:00:00.000Z',
- };
-
- saveCredentials(creds);
-
- const credPath = getCredentialsPath();
- expect(fs.existsSync(credPath)).toBe(true);
-
- const content = JSON.parse(fs.readFileSync(credPath, 'utf-8'));
- expect(content.apiKey).toBe('sk_memmesh_test123');
- expect(content.email).toBe('test@example.com');
- });
-
- it('should set file permissions to 0o600', () => {
- saveCredentials({
- apiKey: 'sk_memmesh_test',
- createdAt: new Date().toISOString(),
- });
-
- const credPath = getCredentialsPath();
- const stats = fs.statSync(credPath);
- expect(stats.mode & 0o777).toBe(0o600);
- });
-
- it('should overwrite existing credentials', () => {
- saveCredentials({ apiKey: 'old_key', createdAt: '2026-01-01T00:00:00Z' });
- saveCredentials({ apiKey: 'new_key', createdAt: '2026-02-01T00:00:00Z' });
-
- const creds = loadCredentials();
- expect(creds?.apiKey).toBe('new_key');
- });
-
- it('should store optional baseUrl', () => {
- saveCredentials({
- apiKey: 'sk_memmesh_test',
- baseUrl: 'https://custom-backend.example.com',
- createdAt: new Date().toISOString(),
- });
-
- const creds = loadCredentials();
- expect(creds?.baseUrl).toBe('https://custom-backend.example.com');
- });
-});
-
-describe('loadCredentials', () => {
- it('should return null when no file exists', () => {
- expect(loadCredentials()).toBeNull();
- });
-
- it('should return credentials when file is valid', () => {
- saveCredentials({
- apiKey: 'sk_memmesh_abc',
- userId: 'user-123',
- createdAt: '2026-01-01T00:00:00Z',
- });
-
- const creds = loadCredentials();
- expect(creds).not.toBeNull();
- expect(creds!.apiKey).toBe('sk_memmesh_abc');
- expect(creds!.userId).toBe('user-123');
- });
-
- it('should return null for invalid JSON', () => {
- const credPath = getCredentialsPath();
- fs.mkdirSync(path.dirname(credPath), { recursive: true });
- fs.writeFileSync(credPath, 'not json at all');
-
- expect(loadCredentials()).toBeNull();
- });
-
- it('should return null when apiKey is missing', () => {
- const credPath = getCredentialsPath();
- fs.mkdirSync(path.dirname(credPath), { recursive: true });
- fs.writeFileSync(credPath, JSON.stringify({ email: 'test@example.com' }));
-
- expect(loadCredentials()).toBeNull();
- });
-
- it('should return null when apiKey is not a string', () => {
- const credPath = getCredentialsPath();
- fs.mkdirSync(path.dirname(credPath), { recursive: true });
- fs.writeFileSync(credPath, JSON.stringify({ apiKey: 12345 }));
-
- expect(loadCredentials()).toBeNull();
- });
-
- it('should return null when apiKey is empty string', () => {
- const credPath = getCredentialsPath();
- fs.mkdirSync(path.dirname(credPath), { recursive: true });
- fs.writeFileSync(credPath, JSON.stringify({ apiKey: '' }));
-
- expect(loadCredentials()).toBeNull();
- });
-});
-
-describe('deleteCredentials', () => {
- it('should delete existing credentials and return true', () => {
- saveCredentials({
- apiKey: 'sk_memmesh_todelete',
- createdAt: new Date().toISOString(),
- });
-
- const result = deleteCredentials();
- expect(result).toBe(true);
- expect(loadCredentials()).toBeNull();
- });
-
- it('should return false when no file exists', () => {
- const result = deleteCredentials();
- expect(result).toBe(false);
- });
-});
diff --git a/src/cli/__tests__/dashboard.test.ts b/src/cli/__tests__/dashboard.test.ts
deleted file mode 100644
index 060ce1e8..00000000
--- a/src/cli/__tests__/dashboard.test.ts
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Dashboard Tests
- *
- * Tests for the CLI dashboard functionality
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { HealthChecker } from '../../core/HealthCheck.js';
-import { UnifiedMemoryStore } from '../../memory/UnifiedMemoryStore.js';
-
-describe('Dashboard Metrics Collection', () => {
- let healthChecker: HealthChecker;
- let memoryStore: UnifiedMemoryStore;
-
- beforeEach(async () => {
- healthChecker = new HealthChecker({ timeout: 3000 });
- });
-
- afterEach(async () => {
- if (memoryStore) {
- memoryStore.close();
- }
- });
-
- it('should collect system health metrics', async () => {
- const health = await healthChecker.checkAll();
-
- expect(health).toBeDefined();
- expect(health.status).toMatch(/healthy|degraded|unhealthy/);
- expect(health.components).toBeInstanceOf(Array);
- expect(health.components.length).toBeGreaterThan(0);
- expect(health.isHealthy).toBeDefined();
- expect(health.totalDurationMs).toBeGreaterThanOrEqual(0);
- });
-
- it('should collect memory statistics', async () => {
- memoryStore = await UnifiedMemoryStore.create(':memory:');
-
- // Store some test memories
- await memoryStore.store({
- type: 'knowledge',
- content: 'Test knowledge entry',
- tags: ['test'],
- importance: 0.8,
- timestamp: new Date(),
- });
-
- const memories = await memoryStore.search('', { limit: 10 });
-
- expect(memories).toBeInstanceOf(Array);
- expect(memories.length).toBeGreaterThan(0);
- expect(memories[0]).toHaveProperty('type');
- expect(memories[0]).toHaveProperty('content');
- });
-
- it('should measure process memory usage', () => {
- const memUsage = process.memoryUsage();
-
- expect(memUsage.heapUsed).toBeGreaterThan(0);
- expect(memUsage.heapTotal).toBeGreaterThan(0);
- expect(memUsage.heapUsed).toBeLessThanOrEqual(memUsage.heapTotal);
-
- const usagePercent = (memUsage.heapUsed / memUsage.heapTotal) * 100;
- expect(usagePercent).toBeGreaterThan(0);
- expect(usagePercent).toBeLessThanOrEqual(100);
- });
-
- it('should measure uptime', () => {
- const uptime = Math.floor(process.uptime());
-
- expect(uptime).toBeGreaterThanOrEqual(0);
- expect(typeof uptime).toBe('number');
- });
-
- it('should handle health check timeout gracefully', async () => {
- const fastChecker = new HealthChecker({ timeout: 1 });
-
- const health = await fastChecker.checkAll();
-
- // Should complete even with very short timeout
- expect(health).toBeDefined();
- expect(health.components).toBeInstanceOf(Array);
- });
-
- it('should categorize health status correctly', async () => {
- const health = await healthChecker.checkAll();
-
- for (const component of health.components) {
- expect(component.status).toMatch(/healthy|degraded|unhealthy|unknown/);
- expect(component.name).toBeDefined();
- expect(component.message).toBeDefined();
- expect(component.durationMs).toBeGreaterThanOrEqual(0);
- expect(component.timestamp).toBeInstanceOf(Date);
- }
- });
-
- it('should calculate overall system health', async () => {
- const health = await healthChecker.checkAll();
-
- const hasUnhealthy = health.components.some((c) => c.status === 'unhealthy');
- const hasDegraded = health.components.some((c) => c.status === 'degraded');
-
- if (hasUnhealthy) {
- expect(health.status).toBe('unhealthy');
- expect(health.isHealthy).toBe(false);
- } else if (hasDegraded) {
- expect(health.status).toBe('degraded');
- expect(health.isHealthy).toBe(true);
- } else {
- expect(health.status).toBe('healthy');
- expect(health.isHealthy).toBe(true);
- }
- });
-
- it('should include timestamp in health check results', async () => {
- const beforeCheck = new Date();
- const health = await healthChecker.checkAll();
- const afterCheck = new Date();
-
- expect(health.timestamp).toBeInstanceOf(Date);
- expect(health.timestamp.getTime()).toBeGreaterThanOrEqual(beforeCheck.getTime());
- expect(health.timestamp.getTime()).toBeLessThanOrEqual(afterCheck.getTime());
-
- // Each component should also have timestamp
- for (const component of health.components) {
- expect(component.timestamp).toBeInstanceOf(Date);
- expect(component.timestamp.getTime()).toBeGreaterThanOrEqual(beforeCheck.getTime());
- expect(component.timestamp.getTime()).toBeLessThanOrEqual(afterCheck.getTime());
- }
- });
-});
-
-describe('Dashboard Display Formatting', () => {
- it('should format memory bar correctly', () => {
- // Test memory bar rendering logic
- const testCases = [
- { percent: 0, expectedFilled: 0, color: 'green' },
- { percent: 50, expectedFilled: 10, color: 'green' },
- { percent: 76, expectedFilled: 15, color: 'yellow' },
- { percent: 91, expectedFilled: 18, color: 'red' },
- { percent: 100, expectedFilled: 20, color: 'red' },
- ];
-
- const barLength = 20;
-
- for (const testCase of testCases) {
- const filled = Math.round((testCase.percent / 100) * barLength);
- expect(filled).toBe(testCase.expectedFilled);
- }
- });
-
- it('should format uptime string correctly', () => {
- const testCases = [
- { uptime: 0, expected: { h: 0, m: 0, s: 0 } },
- { uptime: 65, expected: { h: 0, m: 1, s: 5 } },
- { uptime: 3661, expected: { h: 1, m: 1, s: 1 } },
- { uptime: 7200, expected: { h: 2, m: 0, s: 0 } },
- ];
-
- for (const testCase of testCases) {
- const hours = Math.floor(testCase.uptime / 3600);
- const minutes = Math.floor((testCase.uptime % 3600) / 60);
- const seconds = testCase.uptime % 60;
-
- expect(hours).toBe(testCase.expected.h);
- expect(minutes).toBe(testCase.expected.m);
- expect(seconds).toBe(testCase.expected.s);
- }
- });
-
- it('should truncate long activity messages', () => {
- const longContent = 'This is a very long content that should be truncated to 50 characters maximum';
- const maxLength = 50;
-
- const truncated =
- longContent.substring(0, maxLength) + (longContent.length > maxLength ? '...' : '');
-
- expect(truncated.length).toBeLessThanOrEqual(maxLength + 3); // +3 for '...'
- expect(truncated).toContain('...');
- });
-
- it('should handle empty activity list', () => {
- const activities: string[] = [];
-
- expect(activities.length).toBe(0);
- // Dashboard should show "No recent activities" message
- });
-
- it('should limit activities to max count', () => {
- const MAX_RECENT_ACTIVITIES = 10;
- const manyActivities = Array.from({ length: 20 }, (_, i) => `Activity ${i + 1}`);
-
- const limited = manyActivities.slice(0, MAX_RECENT_ACTIVITIES);
-
- expect(limited.length).toBe(MAX_RECENT_ACTIVITIES);
- expect(limited[0]).toBe('Activity 1');
- expect(limited[9]).toBe('Activity 10');
- });
-});
-
-describe('Dashboard Error Handling', () => {
- it('should handle missing database file gracefully', async () => {
- const checker = new HealthChecker();
- const health = await checker.checkAll();
-
- // Database component should be degraded if file doesn't exist
- const dbComponent = health.components.find((c) => c.name === 'database');
- expect(dbComponent).toBeDefined();
- // Status can be degraded (file not found) or healthy (exists)
- expect(dbComponent!.status).toMatch(/healthy|degraded/);
- });
-
- it('should handle memory store creation errors', async () => {
- // Test with invalid path should be handled gracefully
- try {
- // This might fail, but should not crash
- const store = await UnifiedMemoryStore.create('/invalid/path/test.db');
- store.close();
- } catch (error) {
- // Error is expected and should be caught
- expect(error).toBeDefined();
- }
- });
-
- it('should continue on non-critical errors', async () => {
- // Dashboard should continue even if some metrics fail to collect
- const checker = new HealthChecker({ timeout: 5000 });
- const health = await checker.checkAll();
-
- // Should return result even if some checks are 'unknown'
- expect(health).toBeDefined();
- expect(health.components).toBeInstanceOf(Array);
- });
-});
diff --git a/src/cli/__tests__/login.test.ts b/src/cli/__tests__/login.test.ts
deleted file mode 100644
index 098cfab0..00000000
--- a/src/cli/__tests__/login.test.ts
+++ /dev/null
@@ -1,209 +0,0 @@
-import { describe, it, expect, beforeEach, vi } from 'vitest';
-
-// Mock credentials module
-vi.mock('../credentials.js', () => ({
- loadCredentials: vi.fn(),
- saveCredentials: vi.fn(),
- getCredentialsPath: vi.fn(() => '/tmp/test-memesh/credentials.json'),
-}));
-
-// Mock logger
-vi.mock('../../utils/logger.js', () => ({
- logger: { debug: vi.fn(), error: vi.fn(), info: vi.fn(), warn: vi.fn() },
-}));
-
-import { loadCredentials, saveCredentials } from '../credentials.js';
-
-describe('login module', () => {
- const mockLoadCredentials = vi.mocked(loadCredentials);
- const mockSaveCredentials = vi.mocked(saveCredentials);
-
- beforeEach(() => {
- vi.resetAllMocks();
- });
-
- describe('DeviceAuthResponse interface', () => {
- it('should have correct shape', () => {
- const response = {
- device_code: 'test-device-code',
- user_code: 'ABCD-1234',
- verification_uri: 'https://memesh.ai/auth/device',
- verification_uri_complete: 'https://memesh.ai/auth/device?user_code=ABCD-1234',
- expires_in: 600,
- interval: 5,
- };
-
- expect(response.device_code).toBeDefined();
- expect(response.user_code).toMatch(/^[A-Z0-9]{4}-[A-Z0-9]{4}$/);
- expect(response.verification_uri).toContain('https://');
- expect(response.expires_in).toBeGreaterThan(0);
- expect(response.interval).toBeGreaterThan(0);
- });
- });
-
- describe('TokenResponse interface', () => {
- it('should have correct shape', () => {
- const response = {
- api_key: 'sk_memmesh_abc123def456',
- token_type: 'api_key',
- };
-
- expect(response.api_key).toMatch(/^sk_memmesh_/);
- expect(response.token_type).toBe('api_key');
- });
- });
-
- describe('openBrowser URL validation', () => {
- // Test the URL validation logic directly
- it('should accept https URLs', () => {
- const url = 'https://memesh.ai/auth/device?user_code=ABCD-1234';
- const parsed = new URL(url);
- expect(parsed.protocol).toBe('https:');
- });
-
- it('should accept http URLs', () => {
- const url = 'http://localhost:3000/auth/device';
- const parsed = new URL(url);
- expect(parsed.protocol).toBe('http:');
- });
-
- it('should reject non-http protocols', () => {
- const url = 'javascript:alert(1)';
- const parsed = new URL(url);
- expect(parsed.protocol).not.toBe('https:');
- expect(parsed.protocol).not.toBe('http:');
- });
-
- it('should reject invalid URLs', () => {
- expect(() => new URL('not-a-url')).toThrow();
- });
- });
-
- describe('manualKeyLogin via secure stdin', () => {
- it('should validate key format prefix', () => {
- const validKey = 'sk_memmesh_abc123def456';
- expect(validKey.startsWith('sk_memmesh_')).toBe(true);
-
- const invalidKey = 'sk_live_abc123';
- expect(invalidKey.startsWith('sk_memmesh_')).toBe(false);
- });
-
- it('should export readApiKeyFromStdin function that returns a promise', async () => {
- const { readApiKeyFromStdin } = await import('../login.js');
- expect(readApiKeyFromStdin).toBeDefined();
- expect(typeof readApiKeyFromStdin).toBe('function');
- // The function should return a Promise (thenable)
- // We don't call it here since it requires interactive stdin
- });
-
- it('should not have --api-key option (security: prevents ps aux exposure)', async () => {
- const { Command } = await import('commander');
- const { registerLoginCommand } = await import('../login.js');
- const testProgram = new Command();
- registerLoginCommand(testProgram);
- const loginCmd = testProgram.commands.find(c => c.name() === 'login');
- expect(loginCmd).toBeDefined();
- // Verify --api-key option does NOT exist
- const apiKeyOpt = loginCmd!.options.find(o => o.long === '--api-key');
- expect(apiKeyOpt).toBeUndefined();
- });
-
- it('should have --manual flag option for secure stdin input', async () => {
- const { Command } = await import('commander');
- const { registerLoginCommand } = await import('../login.js');
- const testProgram = new Command();
- registerLoginCommand(testProgram);
- const loginCmd = testProgram.commands.find(c => c.name() === 'login');
- expect(loginCmd).toBeDefined();
- // Verify --manual option exists and is a boolean flag (no argument)
- const manualOpt = loginCmd!.options.find(o => o.long === '--manual');
- expect(manualOpt).toBeDefined();
- expect(manualOpt!.required).toBeFalsy();
- });
- });
-
- describe('deviceFlowLogin', () => {
- it('should detect existing credentials and skip login', async () => {
- mockLoadCredentials.mockReturnValue({
- apiKey: 'sk_memmesh_existing',
- createdAt: '2026-01-01T00:00:00Z',
- });
-
- // The actual function calls process.exit, so we test the logic flow
- const existing = loadCredentials();
- expect(existing).not.toBeNull();
- expect(existing!.apiKey).toBe('sk_memmesh_existing');
- });
-
- it('should proceed when no existing credentials', () => {
- mockLoadCredentials.mockReturnValue(null);
- expect(loadCredentials()).toBeNull();
- });
- });
-
- describe('polling error handling', () => {
- it('should handle authorization_pending', () => {
- const error = { error: 'authorization_pending', error_description: 'Waiting' };
- expect(error.error).toBe('authorization_pending');
- });
-
- it('should handle slow_down by increasing interval', () => {
- let interval = 5000;
- const error = { error: 'slow_down' };
- if (error.error === 'slow_down') {
- interval += 5000;
- }
- expect(interval).toBe(10000);
- });
-
- it('should handle access_denied', () => {
- const error = { error: 'access_denied' };
- expect(error.error).toBe('access_denied');
- });
-
- it('should handle expired_token', () => {
- const error = { error: 'expired_token' };
- expect(error.error).toBe('expired_token');
- });
- });
-
- describe('credential saving on success', () => {
- it('should save credentials with default backend URL', () => {
- const tokenData = { api_key: 'sk_memmesh_newkey', token_type: 'api_key' };
- const backendUrl = 'https://api.memesh.ai';
- const DEFAULT_BACKEND_URL = 'https://api.memesh.ai';
-
- saveCredentials({
- apiKey: tokenData.api_key,
- baseUrl: backendUrl !== DEFAULT_BACKEND_URL ? backendUrl : undefined,
- createdAt: new Date().toISOString(),
- });
-
- expect(mockSaveCredentials).toHaveBeenCalledWith(
- expect.objectContaining({
- apiKey: 'sk_memmesh_newkey',
- baseUrl: undefined,
- }),
- );
- });
-
- it('should save credentials with custom backend URL', () => {
- const tokenData = { api_key: 'sk_memmesh_newkey', token_type: 'api_key' };
- const backendUrl = 'https://custom-backend.example.com';
- const DEFAULT_BACKEND_URL = 'https://api.memesh.ai';
-
- saveCredentials({
- apiKey: tokenData.api_key,
- baseUrl: backendUrl !== DEFAULT_BACKEND_URL ? backendUrl : undefined,
- createdAt: new Date().toISOString(),
- });
-
- expect(mockSaveCredentials).toHaveBeenCalledWith(
- expect.objectContaining({
- apiKey: 'sk_memmesh_newkey',
- baseUrl: 'https://custom-backend.example.com',
- }),
- );
- });
- });
-});
diff --git a/src/cli/__tests__/logout.test.ts b/src/cli/__tests__/logout.test.ts
deleted file mode 100644
index 0736e2fe..00000000
--- a/src/cli/__tests__/logout.test.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-import { describe, it, expect, vi, beforeEach } from 'vitest';
-
-// Mock credentials module
-vi.mock('../credentials.js', () => ({
- loadCredentials: vi.fn(),
- deleteCredentials: vi.fn(),
-}));
-
-import { loadCredentials, deleteCredentials } from '../credentials.js';
-
-const mockLoadCredentials = vi.mocked(loadCredentials);
-const mockDeleteCredentials = vi.mocked(deleteCredentials);
-
-describe('logout logic', () => {
- beforeEach(() => {
- vi.resetAllMocks();
- });
-
- it('should detect when not logged in', () => {
- mockLoadCredentials.mockReturnValue(null);
- const existing = loadCredentials();
- expect(existing).toBeNull();
- });
-
- it('should delete credentials when logged in', () => {
- mockLoadCredentials.mockReturnValue({
- apiKey: 'sk_memmesh_todelete',
- createdAt: '2026-01-01T00:00:00Z',
- });
- mockDeleteCredentials.mockReturnValue(true);
-
- const existing = loadCredentials();
- expect(existing).not.toBeNull();
-
- const result = deleteCredentials();
- expect(result).toBe(true);
- expect(mockDeleteCredentials).toHaveBeenCalledOnce();
- });
-
- it('should handle delete failure', () => {
- mockLoadCredentials.mockReturnValue({
- apiKey: 'sk_memmesh_todelete',
- createdAt: '2026-01-01T00:00:00Z',
- });
- mockDeleteCredentials.mockReturnValue(false);
-
- const result = deleteCredentials();
- expect(result).toBe(false);
- });
-});
diff --git a/src/cli/__tests__/stats.test.ts b/src/cli/__tests__/stats.test.ts
deleted file mode 100644
index ca9074b4..00000000
--- a/src/cli/__tests__/stats.test.ts
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Stats Command Tests
- */
-
-import { describe, it, expect, beforeAll } from 'vitest';
-import { StatsCommand } from '../stats.js';
-import { KnowledgeGraph } from '../../knowledge-graph/index.js';
-import { tmpdir } from 'os';
-import { join } from 'path';
-import { mkdirSync } from 'fs';
-
-describe('StatsCommand', () => {
- let testDir: string;
- let kg: KnowledgeGraph;
-
- beforeAll(async () => {
- // Create temporary directory for test database
- testDir = join(tmpdir(), `memesh-stats-test-${Date.now()}`);
- mkdirSync(testDir, { recursive: true });
-
- // Create test database with sample data
- const dbPath = join(testDir, 'test-kg.db');
- kg = await KnowledgeGraph.create(dbPath);
-
- // Add sample entities
- await kg.createEntity({
- name: 'Test Decision 1',
- entityType: 'decision',
- observations: ['This is a test decision'],
- tags: ['test', 'sample'],
- });
-
- await kg.createEntity({
- name: 'Test Bug Fix 1',
- entityType: 'bug_fix',
- observations: ['Fixed a critical bug'],
- tags: ['bug', 'critical'],
- });
-
- await kg.createEntity({
- name: 'Test Feature 1',
- entityType: 'feature',
- observations: ['Implemented new feature'],
- tags: ['feature', 'enhancement'],
- });
- });
-
- it('should create StatsCommand instance', async () => {
- const stats = await StatsCommand.create();
- expect(stats).toBeDefined();
- });
-
- it('should run stats without errors (default options)', async () => {
- const stats = await StatsCommand.create();
- await expect(stats.run()).resolves.not.toThrow();
- });
-
- it('should run stats with time range filter', async () => {
- const stats = await StatsCommand.create();
- await expect(stats.run({ range: 'week' })).resolves.not.toThrow();
- });
-
- it('should run stats with verbose mode', async () => {
- const stats = await StatsCommand.create();
- await expect(stats.run({ verbose: true })).resolves.not.toThrow();
- });
-
- it('should export as JSON', async () => {
- const stats = await StatsCommand.create();
- await expect(stats.run({ export: 'json' })).resolves.not.toThrow();
- });
-
- it('should export as CSV', async () => {
- const stats = await StatsCommand.create();
- await expect(stats.run({ export: 'csv' })).resolves.not.toThrow();
- });
-});
diff --git a/src/cli/config.ts b/src/cli/config.ts
deleted file mode 100644
index 440c55d5..00000000
--- a/src/cli/config.ts
+++ /dev/null
@@ -1,663 +0,0 @@
-/**
- * Configuration Management for MeMesh
- *
- * Provides utilities for viewing, validating, editing, and resetting MeMesh configuration.
- *
- * Features:
- * - Show current configuration with syntax highlighting
- * - Validate MCP setup and test connection
- * - Open config in default editor
- * - Reset to default configuration (with confirmation)
- * - Platform-specific config path detection
- */
-
-import chalk from 'chalk';
-import fs from 'fs-extra';
-import path from 'path';
-import os from 'os';
-import inquirer from 'inquirer';
-import { spawn } from 'child_process';
-import { logger } from '../utils/logger.js';
-import { ProgressIndicator } from '../ui/ProgressIndicator.js';
-import boxen from 'boxen';
-
-/**
- * MCP Server Configuration
- */
-interface MCPServerConfig {
- command: string;
- args: string[];
- env?: Record;
-}
-
-/**
- * Claude Code Configuration
- */
-interface ClaudeCodeConfig {
- mcpServers?: {
- [key: string]: MCPServerConfig;
- };
- [key: string]: any;
-}
-
-/**
- * Configuration validation result
- */
-interface ValidationResult {
- valid: boolean;
- errors: string[];
- warnings: string[];
- config?: ClaudeCodeConfig;
-}
-
-/**
- * Configuration utility class
- */
-export class ConfigManager {
- /**
- * Get Claude Code config file path for current platform
- */
- static getConfigPath(): string {
- const platform = os.platform();
- const homeDir = os.homedir();
-
- if (platform === 'darwin') {
- // macOS: ~/Library/Application Support/Claude/claude_desktop_config.json
- return path.join(
- homeDir,
- 'Library/Application Support/Claude/claude_desktop_config.json'
- );
- } else if (platform === 'win32') {
- // Windows: %APPDATA%\Claude\claude_desktop_config.json
- return path.join(process.env.APPDATA || '', 'Claude/claude_desktop_config.json');
- } else {
- // Linux: ~/.config/Claude/claude_desktop_config.json
- return path.join(homeDir, '.config/Claude/claude_desktop_config.json');
- }
- }
-
- /**
- * Get platform-specific config directory description
- */
- static getConfigPathDescription(): string {
- const platform = os.platform();
-
- if (platform === 'darwin') {
- return '~/Library/Application Support/Claude/claude_desktop_config.json';
- } else if (platform === 'win32') {
- return '%APPDATA%\\Claude\\claude_desktop_config.json';
- } else {
- return '~/.config/Claude/claude_desktop_config.json';
- }
- }
-
- /**
- * Read and parse Claude Code configuration
- */
- static async readConfig(): Promise {
- const configPath = this.getConfigPath();
-
- try {
- if (!(await fs.pathExists(configPath))) {
- return null;
- }
-
- const content = await fs.readFile(configPath, 'utf-8');
- return JSON.parse(content);
- } catch (error) {
- logger.error('Failed to read config', { error });
- return null;
- }
- }
-
- /**
- * Write Claude Code configuration
- */
- static async writeConfig(config: ClaudeCodeConfig): Promise {
- const configPath = this.getConfigPath();
-
- try {
- // Ensure directory exists
- await fs.ensureDir(path.dirname(configPath));
-
- // Write config
- await fs.writeJSON(configPath, config, { spaces: 2 });
-
- return true;
- } catch (error) {
- logger.error('Failed to write config', { error });
- return false;
- }
- }
-
- /**
- * Validate configuration
- */
- static async validateConfig(): Promise {
- const result: ValidationResult = {
- valid: true,
- errors: [],
- warnings: [],
- };
-
- const configPath = this.getConfigPath();
-
- // Check if config file exists
- if (!(await fs.pathExists(configPath))) {
- result.valid = false;
- result.errors.push('Configuration file does not exist');
- return result;
- }
-
- // Try to parse JSON
- let config: ClaudeCodeConfig;
- try {
- const content = await fs.readFile(configPath, 'utf-8');
- config = JSON.parse(content);
- result.config = config;
- } catch (error) {
- result.valid = false;
- result.errors.push(
- `Invalid JSON syntax: ${error instanceof Error ? error.message : String(error)}`
- );
- return result;
- }
-
- // Validate structure
- if (!config.mcpServers) {
- result.warnings.push('No MCP servers configured');
- }
-
- // Check for memesh server
- if (!config.mcpServers?.memesh) {
- result.valid = false;
- result.errors.push('MeMesh MCP server not configured');
- return result;
- }
-
- // Validate memesh server config
- const memeshConfig = config.mcpServers.memesh;
-
- if (!memeshConfig.command) {
- result.errors.push('MeMesh server: missing "command" field');
- result.valid = false;
- }
-
- if (!memeshConfig.args || !Array.isArray(memeshConfig.args)) {
- result.errors.push('MeMesh server: missing or invalid "args" field');
- result.valid = false;
- }
-
- // Check if memesh executable exists
- if (memeshConfig.args && memeshConfig.args.length > 0) {
- const executablePath = memeshConfig.args[0];
- if (!(await fs.pathExists(executablePath))) {
- result.warnings.push(
- `MeMesh executable not found: ${executablePath}`
- );
- }
- }
-
- return result;
- }
-
- /**
- * Generate default configuration
- */
- static generateDefaultConfig(): ClaudeCodeConfig {
- // Try to find global memesh installation
- const memeshPath = this.findMemeshPath();
-
- return {
- mcpServers: {
- memesh: {
- command: 'node',
- args: [memeshPath],
- env: {},
- },
- },
- };
- }
-
- /**
- * Find memesh installation path
- */
- private static findMemeshPath(): string {
- const platform = os.platform();
-
- // Try global node_modules
- let globalNodeModules: string;
- if (platform === 'win32') {
- globalNodeModules = path.join(process.env.APPDATA || '', 'npm/node_modules');
- } else {
- globalNodeModules = '/usr/local/lib/node_modules';
- }
-
- const globalMemeshPath = path.join(
- globalNodeModules,
- '@pcircle/memesh/dist/mcp/server-bootstrap.js'
- );
-
- // Return the path (may not exist yet, setup wizard will handle)
- return globalMemeshPath;
- }
-
- /**
- * Highlight JSON with colors
- */
- static highlightJSON(obj: any): string {
- const json = JSON.stringify(obj, null, 2);
-
- return json
- .split('\n')
- .map((line) => {
- // Highlight keys
- line = line.replace(/"([^"]+)":/g, (match, key) => {
- return `${chalk.cyan(`"${key}"`)}:`;
- });
-
- // Highlight string values
- line = line.replace(/: "([^"]*)"/g, (match, value) => {
- return `: ${chalk.green(`"${value}"`)}`;
- });
-
- // Highlight booleans
- line = line.replace(/\b(true|false)\b/g, (match) => {
- return chalk.yellow(match);
- });
-
- // Highlight null
- line = line.replace(/\bnull\b/g, chalk.gray('null'));
-
- // Highlight braces and brackets
- line = line.replace(/[{}[\]]/g, (match) => chalk.dim(match));
-
- return line;
- })
- .join('\n');
- }
-
- /**
- * Open config file in default editor
- */
- static async openInEditor(): Promise {
- const configPath = this.getConfigPath();
-
- // Ensure config exists
- if (!(await fs.pathExists(configPath))) {
- console.log(
- chalk.yellow('⚠️ Configuration file does not exist. Run "memesh setup" first.')
- );
- return false;
- }
-
- const editor = process.env.EDITOR || process.env.VISUAL || this.getDefaultEditor();
-
- return new Promise((resolve) => {
- const child = spawn(editor, [configPath], {
- stdio: 'inherit',
- shell: true,
- });
-
- child.on('exit', (code) => {
- resolve(code === 0);
- });
-
- child.on('error', (error) => {
- logger.error('Failed to open editor', { error });
- console.error(chalk.red(`Failed to open editor: ${error.message}`));
- resolve(false);
- });
- });
- }
-
- /**
- * Get default editor for current platform
- */
- private static getDefaultEditor(): string {
- const platform = os.platform();
-
- if (platform === 'darwin') {
- return 'open -e'; // TextEdit on macOS
- } else if (platform === 'win32') {
- return 'notepad';
- } else {
- return 'nano'; // Fallback to nano on Linux
- }
- }
-
- /**
- * Create backup of config file
- */
- static async backupConfig(): Promise {
- const configPath = this.getConfigPath();
-
- if (!(await fs.pathExists(configPath))) {
- return null;
- }
-
- const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
- const backupPath = `${configPath}.backup-${timestamp}`;
-
- try {
- await fs.copy(configPath, backupPath);
- return backupPath;
- } catch (error) {
- logger.error('Failed to backup config', { error });
- return null;
- }
- }
-}
-
-/**
- * Show current configuration
- */
-export async function showConfig(): Promise {
- const configPath = ConfigManager.getConfigPath();
- const pathDescription = ConfigManager.getConfigPathDescription();
-
- console.log(
- boxen(chalk.bold.cyan('⚙️ MeMesh Configuration'), {
- padding: 1,
- borderColor: 'cyan',
- borderStyle: 'round',
- })
- );
-
- // Show config file location
- console.log(chalk.bold('\n📍 Configuration File:'));
- console.log(chalk.dim(` ${pathDescription}`));
- console.log(chalk.gray(` ${configPath}\n`));
-
- // Check if config exists
- if (!(await fs.pathExists(configPath))) {
- console.log(
- chalk.yellow(
- '⚠️ Configuration file not found. Run "memesh setup" to configure.'
- )
- );
- return;
- }
-
- // Read and display config
- const config = await ConfigManager.readConfig();
-
- if (!config) {
- console.log(chalk.red('❌ Failed to read configuration file'));
- return;
- }
-
- console.log(chalk.bold('📄 Configuration:\n'));
- console.log(ConfigManager.highlightJSON(config));
-
- // Show memesh-specific config
- if (config.mcpServers?.memesh) {
- const memeshConfig = config.mcpServers.memesh;
-
- console.log(chalk.bold('\n🔧 MeMesh Server:'));
- console.log(chalk.dim(' Command: ') + chalk.cyan(memeshConfig.command));
- console.log(
- chalk.dim(' Script: ') + chalk.cyan(memeshConfig.args.join(' '))
- );
-
- // Check if executable exists
- if (memeshConfig.args.length > 0) {
- const executablePath = memeshConfig.args[0];
- const exists = await fs.pathExists(executablePath);
-
- if (exists) {
- console.log(chalk.dim(' Status: ') + chalk.green('✓ Installed'));
- } else {
- console.log(
- chalk.dim(' Status: ') +
- chalk.yellow('⚠ Executable not found')
- );
- }
- }
- }
-
- console.log();
-}
-
-/**
- * Validate configuration and test connection
- */
-export async function validateConfig(): Promise {
- const configPath = ConfigManager.getConfigPath();
-
- console.log(
- boxen(chalk.bold.cyan('🔍 Configuration Validation'), {
- padding: 1,
- borderColor: 'cyan',
- borderStyle: 'round',
- })
- );
-
- console.log(chalk.bold('\n📍 Configuration File:'));
- console.log(chalk.gray(` ${configPath}\n`));
-
- const spinner = ProgressIndicator.simple('Validating configuration...');
-
- // Run validation
- const result = await ConfigManager.validateConfig();
-
- spinner.stop();
-
- // Display results
- if (result.errors.length === 0) {
- console.log(chalk.green('\n✅ Configuration is valid!\n'));
- } else {
- console.log(chalk.red('\n❌ Configuration has errors:\n'));
- result.errors.forEach((error) => {
- console.log(chalk.red(` • ${error}`));
- });
- console.log();
- }
-
- // Display warnings
- if (result.warnings.length > 0) {
- console.log(chalk.yellow('⚠️ Warnings:\n'));
- result.warnings.forEach((warning) => {
- console.log(chalk.yellow(` • ${warning}`));
- });
- console.log();
- }
-
- // Show MeMesh server details
- if (result.config?.mcpServers?.memesh) {
- const memeshConfig = result.config.mcpServers.memesh;
-
- console.log(chalk.bold('🔧 MeMesh Server Configuration:'));
- console.log(chalk.dim(' Command: ') + chalk.cyan(memeshConfig.command));
- console.log(
- chalk.dim(' Script: ') + chalk.cyan(memeshConfig.args.join(' '))
- );
- console.log();
- }
-
- // Connection test note
- if (result.valid) {
- console.log(
- boxen(
- chalk.bold('✓ Next Steps:\n\n') +
- chalk.dim('1. Restart Claude Code to load MeMesh\n') +
- chalk.dim('2. Verify connection: type "buddy-help" in Claude Code\n') +
- chalk.dim('3. Test features: try "buddy-do" or "buddy-remember"'),
- {
- padding: 1,
- borderColor: 'green',
- borderStyle: 'round',
- }
- )
- );
- } else {
- console.log(
- boxen(
- chalk.bold('🔧 Fix Configuration:\n\n') +
- chalk.dim('Run: ') +
- chalk.cyan('memesh setup') +
- chalk.dim(' to reconfigure\n') +
- chalk.dim('Or: ') +
- chalk.cyan('memesh config edit') +
- chalk.dim(' to edit manually'),
- {
- padding: 1,
- borderColor: 'red',
- borderStyle: 'round',
- }
- )
- );
- }
-
- console.log();
-}
-
-/**
- * Edit configuration in default editor
- */
-export async function editConfig(): Promise {
- const configPath = ConfigManager.getConfigPath();
-
- console.log(
- boxen(chalk.bold.cyan('✏️ Edit Configuration'), {
- padding: 1,
- borderColor: 'cyan',
- borderStyle: 'round',
- })
- );
-
- console.log(chalk.bold('\n📍 Configuration File:'));
- console.log(chalk.gray(` ${configPath}\n`));
-
- // Check if config exists
- if (!(await fs.pathExists(configPath))) {
- console.log(
- chalk.yellow(
- '⚠️ Configuration file does not exist. Run "memesh setup" first.\n'
- )
- );
- return;
- }
-
- // Create backup before editing
- const backupPath = await ConfigManager.backupConfig();
- if (backupPath) {
- console.log(chalk.dim(`📦 Backup created: ${path.basename(backupPath)}\n`));
- }
-
- // Open in editor
- console.log(chalk.dim('Opening configuration in editor...\n'));
-
- const success = await ConfigManager.openInEditor();
-
- if (success) {
- console.log(chalk.green('\n✅ Configuration saved'));
-
- // Validate after editing
- console.log(chalk.dim('\nValidating configuration...\n'));
-
- const result = await ConfigManager.validateConfig();
-
- if (result.valid) {
- console.log(chalk.green('✅ Configuration is valid'));
- } else {
- console.log(chalk.red('❌ Configuration has errors:'));
- result.errors.forEach((error) => {
- console.log(chalk.red(` • ${error}`));
- });
-
- if (backupPath) {
- console.log(
- chalk.yellow(
- `\n⚠️ You can restore from backup: ${path.basename(backupPath)}`
- )
- );
- }
- }
- } else {
- console.log(chalk.yellow('\n⚠️ Editor closed without saving'));
- }
-
- console.log();
-}
-
-/**
- * Reset configuration to defaults
- */
-export async function resetConfig(): Promise {
- const configPath = ConfigManager.getConfigPath();
-
- console.log(
- boxen(chalk.bold.red('🔄 Reset Configuration'), {
- padding: 1,
- borderColor: 'red',
- borderStyle: 'round',
- })
- );
-
- console.log(chalk.bold('\n📍 Configuration File:'));
- console.log(chalk.gray(` ${configPath}\n`));
-
- // Confirm reset
- const { confirmReset } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'confirmReset',
- message: chalk.yellow(
- 'Are you sure you want to reset configuration to defaults?'
- ),
- default: false,
- },
- ]);
-
- if (!confirmReset) {
- console.log(chalk.dim('\nReset cancelled.\n'));
- return;
- }
-
- // Create backup
- const backupPath = await ConfigManager.backupConfig();
- if (backupPath) {
- console.log(chalk.green(`\n✅ Backup created: ${path.basename(backupPath)}`));
- }
-
- // Generate default config
- const spinner = ProgressIndicator.simple('Resetting configuration...');
-
- const defaultConfig = ConfigManager.generateDefaultConfig();
- const success = await ConfigManager.writeConfig(defaultConfig);
-
- spinner.stop();
-
- if (success) {
- console.log(chalk.green('\n✅ Configuration reset to defaults\n'));
-
- // Show new config
- console.log(chalk.bold('📄 New Configuration:\n'));
- console.log(ConfigManager.highlightJSON(defaultConfig));
-
- console.log(
- boxen(
- chalk.bold('✓ Next Steps:\n\n') +
- chalk.dim('1. Restart Claude Code to apply changes\n') +
- chalk.dim('2. Verify: type "buddy-help" in Claude Code'),
- {
- padding: 1,
- borderColor: 'green',
- borderStyle: 'round',
- margin: { top: 1 },
- }
- )
- );
- } else {
- console.log(chalk.red('\n❌ Failed to reset configuration'));
-
- if (backupPath) {
- console.log(
- chalk.yellow(`\n⚠️ You can restore from backup: ${path.basename(backupPath)}`)
- );
- }
- }
-
- console.log();
-}
diff --git a/src/cli/credentials.ts b/src/cli/credentials.ts
deleted file mode 100644
index 403b6fb0..00000000
--- a/src/cli/credentials.ts
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * MeMesh Credentials Storage
- *
- * Manages persistent API key credentials for MeMesh Cloud.
- * Stored at ~/.config/memesh/credentials.json with 0o600 permissions.
- */
-
-import * as fs from 'fs';
-import * as path from 'path';
-import * as os from 'os';
-
-export interface MeMeshCredentials {
- apiKey: string;
- email?: string;
- userId?: string;
- baseUrl?: string;
- createdAt: string;
-}
-
-/**
- * Get XDG-compliant credentials file path
- * ~/.config/memesh/credentials.json
- */
-export function getCredentialsPath(): string {
- const configDir = process.env.XDG_CONFIG_HOME || path.join(os.homedir(), '.config');
- return path.join(configDir, 'memesh', 'credentials.json');
-}
-
-/**
- * Load credentials from disk
- * Returns null if file doesn't exist or is invalid
- */
-export function loadCredentials(): MeMeshCredentials | null {
- const credPath = getCredentialsPath();
- try {
- if (!fs.existsSync(credPath)) return null;
- const content = fs.readFileSync(credPath, 'utf-8');
- const creds = JSON.parse(content);
- if (!creds || typeof creds.apiKey !== 'string' || !creds.apiKey) {
- return null;
- }
- return creds as MeMeshCredentials;
- } catch {
- return null;
- }
-}
-
-/**
- * Save credentials to disk with secure permissions (0o600)
- */
-export function saveCredentials(creds: MeMeshCredentials): void {
- const credPath = getCredentialsPath();
- const dir = path.dirname(credPath);
-
- // Create directory (idempotent with recursive: true, avoids TOCTOU race)
- fs.mkdirSync(dir, { recursive: true, mode: 0o700 });
-
- // Write with owner-only permissions
- fs.writeFileSync(credPath, JSON.stringify(creds, null, 2), { mode: 0o600 });
- // Enforce permissions even on pre-existing files (writeFileSync mode only applies to new files)
- fs.chmodSync(credPath, 0o600);
-}
-
-/**
- * Delete credentials file
- */
-export function deleteCredentials(): boolean {
- const credPath = getCredentialsPath();
- try {
- if (fs.existsSync(credPath)) {
- fs.unlinkSync(credPath);
- return true;
- }
- return false;
- } catch {
- return false;
- }
-}
diff --git a/src/cli/daemon.ts b/src/cli/daemon.ts
deleted file mode 100644
index d3557b6f..00000000
--- a/src/cli/daemon.ts
+++ /dev/null
@@ -1,567 +0,0 @@
-/**
- * Daemon CLI Commands
- *
- * Commands for managing the MeMesh daemon:
- * - memesh daemon status Show daemon status
- * - memesh daemon stop Stop daemon (graceful)
- * - memesh daemon stop -f Force stop daemon
- * - memesh daemon restart Restart daemon
- * - memesh daemon logs View daemon logs
- * - memesh daemon info Detailed diagnostics
- */
-
-import { Command } from 'commander';
-import chalk from 'chalk';
-import fs from 'fs';
-import path from 'path';
-import { spawn, execFile } from 'child_process';
-import { promisify } from 'util';
-import { DaemonLockManager } from '../mcp/daemon/DaemonLockManager.js';
-import { IpcTransport } from '../mcp/daemon/IpcTransport.js';
-import { createShutdown, serializeMessage } from '../mcp/daemon/DaemonProtocol.js';
-import { getDataDirectory } from '../utils/PathResolver.js';
-import { logger } from '../utils/logger.js';
-
-const execFileAsync = promisify(execFile);
-
-// ═══════════════════════════════════════════════════════════
-// Constants
-// ═══════════════════════════════════════════════════════════
-const DEFAULT_LOG_LINES = 50;
-const SHUTDOWN_TIMEOUT_MS = 10000;
-const PID_CHECK_INTERVAL_MS = 100;
-const CONNECTION_TIMEOUT_MS = 5000;
-const STATUS_CHECK_TIMEOUT_MS = 2000;
-const MIN_LOG_LINES = 1;
-const MAX_LOG_LINES = 10000;
-
-/**
- * Format error for structured logging
- */
-function formatError(error: unknown): { message: string; stack?: string } {
- return {
- message: error instanceof Error ? error.message : String(error),
- stack: error instanceof Error ? error.stack : undefined,
- };
-}
-
-/**
- * Validate that a path is within the expected data directory (security check)
- * Prevents path traversal attacks
- * @returns true if path is within the expected directory
- */
-function isPathWithinDataDir(targetPath: string): boolean {
- const dataDir = getDataDirectory();
- const normalizedTarget = path.resolve(targetPath);
- const normalizedDataDir = path.resolve(dataDir);
-
- // Ensure the target path starts with the data directory
- return normalizedTarget.startsWith(normalizedDataDir + path.sep) ||
- normalizedTarget === normalizedDataDir;
-}
-
-/**
- * Validate that a path exists and is a regular file
- * @returns true if valid, false otherwise
- */
-function validateLogPath(logPath: string): { valid: boolean; error?: string } {
- // Security check: ensure path is within data directory
- if (!isPathWithinDataDir(logPath)) {
- return { valid: false, error: 'Log path is outside the expected data directory' };
- }
-
- if (!fs.existsSync(logPath)) {
- return { valid: false, error: 'Log file does not exist' };
- }
-
- try {
- const stat = fs.statSync(logPath);
- if (!stat.isFile()) {
- return { valid: false, error: 'Log path is not a regular file' };
- }
-
- // Check read permission by attempting to open
- fs.accessSync(logPath, fs.constants.R_OK);
-
- return { valid: true };
- } catch (error) {
- if ((error as NodeJS.ErrnoException).code === 'EACCES') {
- return { valid: false, error: 'Permission denied reading log file' };
- }
- return { valid: false, error: `Cannot access log file: ${formatError(error).message}` };
- }
-}
-
-/**
- * Parse and validate the lines option
- * @returns validated number or null if invalid
- */
-function parseAndValidateLines(linesOption: string): number | null {
- const parsed = parseInt(linesOption, 10);
-
- if (isNaN(parsed)) {
- return null;
- }
-
- if (parsed < MIN_LOG_LINES || parsed > MAX_LOG_LINES) {
- return null;
- }
-
- return parsed;
-}
-
-/**
- * Format uptime in human-readable format
- */
-function formatUptime(ms: number): string {
- const seconds = Math.floor(ms / 1000);
- const minutes = Math.floor(seconds / 60);
- const hours = Math.floor(minutes / 60);
- const days = Math.floor(hours / 24);
-
- if (days > 0) return `${days}d ${hours % 24}h`;
- if (hours > 0) return `${hours}h ${minutes % 60}m`;
- if (minutes > 0) return `${minutes}m ${seconds % 60}s`;
- return `${seconds}s`;
-}
-
-/**
- * Wait for a process to exit
- */
-async function waitForPidExit(pid: number, timeout: number): Promise {
- const startTime = Date.now();
-
- while (Date.now() - startTime < timeout) {
- if (!DaemonLockManager.isPidAlive(pid)) {
- return true;
- }
- await new Promise((resolve) => setTimeout(resolve, PID_CHECK_INTERVAL_MS));
- }
-
- return false;
-}
-
-/**
- * Get recent error lines from log file using piped processes
- *
- * SECURITY: Uses separate spawn processes with pipes instead of shell command
- * interpolation to prevent shell injection attacks.
- *
- * @param logPath Path to the log file (must be validated before calling)
- * @param tailLines Number of lines to read from end of file
- * @param errorLines Number of error lines to return
- * @returns String containing recent error lines
- */
-async function getRecentLogErrors(
- logPath: string,
- tailLines: number,
- errorLines: number
-): Promise {
- return new Promise((resolve, reject) => {
- // Spawn tail process - reads last N lines from file
- // Arguments are passed as array, no shell interpolation
- const tail1 = spawn('tail', ['-n', String(tailLines), logPath]);
-
- // Spawn grep process - filters for error lines
- const grep = spawn('grep', ['-i', 'error']);
-
- // Spawn final tail process - limits to last N errors
- const tail2 = spawn('tail', ['-n', String(errorLines)]);
-
- // Pipe: tail1 -> grep -> tail2
- tail1.stdout.pipe(grep.stdin);
- grep.stdout.pipe(tail2.stdin);
-
- // Handle errors from any process in the pipeline
- tail1.on('error', (err) => reject(err));
- grep.on('error', (err) => reject(err));
- tail2.on('error', (err) => reject(err));
-
- // Handle tail1 close - propagate to grep
- tail1.on('close', (code) => {
- if (code !== 0 && code !== null) {
- // tail error (e.g., file not found)
- grep.stdin.end();
- }
- });
-
- // Handle grep close - propagate to tail2
- grep.on('close', (code) => {
- tail2.stdin.end();
- // grep exit code 1 means no matches (not an error)
- if (code !== 0 && code !== 1 && code !== null) {
- reject(new Error(`grep exited with code ${code}`));
- }
- });
-
- // Collect output from final tail
- let output = '';
- tail2.stdout.on('data', (data: Buffer) => {
- output += data.toString();
- });
-
- // Resolve when final process completes
- tail2.on('close', (code) => {
- if (code === 0 || code === null) {
- resolve(output);
- } else {
- reject(Object.assign(new Error(`tail exited with code ${code}`), { code }));
- }
- });
- });
-}
-
-/**
- * Create daemon command group
- */
-export function createDaemonCommand(): Command {
- const daemon = new Command('daemon')
- .description('Manage MeMesh daemon process');
-
- // ═══════════════════════════════════════════════════════════
- // daemon status
- // ═══════════════════════════════════════════════════════════
- daemon
- .command('status')
- .description('Show daemon status')
- .action(async () => {
- try {
- const lockInfo = await DaemonLockManager.readLock();
- const transport = new IpcTransport();
-
- console.log(chalk.bold('\n📊 MeMesh Daemon Status\n'));
- console.log('═'.repeat(50));
-
- if (!lockInfo) {
- console.log(chalk.yellow('Status: ') + chalk.red('Not Running'));
- console.log(chalk.dim('No daemon lock file found.'));
- console.log('═'.repeat(50));
- return;
- }
-
- // Check if PID is alive
- const isAlive = DaemonLockManager.isPidAlive(lockInfo.pid);
-
- // Check if socket is responding
- const isResponding = await transport.isRunning(STATUS_CHECK_TIMEOUT_MS);
-
- if (isAlive && isResponding) {
- console.log(chalk.yellow('Status: ') + chalk.green('Running ✓'));
- } else if (isAlive && !isResponding) {
- console.log(chalk.yellow('Status: ') + chalk.red('Not Responding ⚠'));
- } else {
- console.log(chalk.yellow('Status: ') + chalk.red('Stale (zombie lock)'));
- }
-
- console.log(chalk.yellow('PID: ') + lockInfo.pid);
- console.log(chalk.yellow('Version: ') + lockInfo.version);
- console.log(chalk.yellow('Started: ') + new Date(lockInfo.startTime).toLocaleString());
- console.log(chalk.yellow('Uptime: ') + formatUptime(Date.now() - lockInfo.startTime));
- console.log(chalk.yellow('Clients: ') + lockInfo.clientCount);
- console.log(chalk.yellow('Socket: ') + transport.getPath());
- console.log('═'.repeat(50));
- } catch (error) {
- logger.error('Daemon status failed', formatError(error));
- console.error(chalk.red('Failed to get daemon status:'), formatError(error).message);
- process.exit(1);
- }
- });
-
- // ═══════════════════════════════════════════════════════════
- // daemon stop
- // ═══════════════════════════════════════════════════════════
- daemon
- .command('stop')
- .description('Stop the daemon')
- .option('-f, --force', 'Force kill without graceful shutdown')
- .action(async (options) => {
- try {
- const lockInfo = await DaemonLockManager.readLock();
-
- if (!lockInfo) {
- console.log(chalk.yellow('Daemon is not running.'));
- return;
- }
-
- if (options.force) {
- // Force kill
- console.log(chalk.yellow(`Force killing daemon (PID: ${lockInfo.pid})...`));
-
- try {
- process.kill(lockInfo.pid, 'SIGKILL');
- await DaemonLockManager.forceClearLock();
- console.log(chalk.green('Daemon force killed.'));
- } catch (error: any) {
- if (error.code === 'ESRCH') {
- // Process already dead
- await DaemonLockManager.forceClearLock();
- console.log(chalk.green('Daemon was not running, lock cleared.'));
- } else {
- throw error;
- }
- }
- } else {
- // Graceful shutdown
- console.log(chalk.yellow('Requesting graceful shutdown...'));
-
- const transport = new IpcTransport();
-
- try {
- const socket = await transport.connect({ timeout: CONNECTION_TIMEOUT_MS });
-
- // Send shutdown message
- const shutdownMsg = createShutdown('user_requested', CONNECTION_TIMEOUT_MS);
- socket.write(serializeMessage(shutdownMsg));
-
- // Wait for socket to drain before ending
- await new Promise((resolve, reject) => {
- socket.once('error', reject);
- socket.once('close', resolve);
- // Use end() which waits for data to be flushed
- socket.end(() => {
- // Data flushed, socket will close
- });
- }).catch(() => {
- // Ignore socket errors during shutdown - daemon may close first
- });
-
- // Wait for shutdown
- const exited = await waitForPidExit(lockInfo.pid, SHUTDOWN_TIMEOUT_MS);
-
- if (exited) {
- console.log(chalk.green('Daemon stopped gracefully.'));
- } else {
- console.log(chalk.yellow('Daemon did not stop within timeout.'));
- console.log(chalk.dim('Use --force to force kill.'));
- }
- } catch (error) {
- console.log(chalk.red('Could not connect to daemon for graceful shutdown.'));
- console.log(chalk.dim('Use --force to force kill.'));
- }
- }
- } catch (error) {
- logger.error('Daemon stop failed', formatError(error));
- console.error(chalk.red('Failed to stop daemon:'), formatError(error).message);
- process.exit(1);
- }
- });
-
- // ═══════════════════════════════════════════════════════════
- // daemon restart
- // ═══════════════════════════════════════════════════════════
- daemon
- .command('restart')
- .description('Restart the daemon')
- .action(async () => {
- try {
- console.log(chalk.yellow('Restarting daemon...'));
-
- const lockInfo = await DaemonLockManager.readLock();
-
- if (lockInfo) {
- const transport = new IpcTransport();
-
- try {
- const socket = await transport.connect({ timeout: CONNECTION_TIMEOUT_MS });
- const shutdownMsg = createShutdown('user_requested', CONNECTION_TIMEOUT_MS);
- socket.write(serializeMessage(shutdownMsg));
- socket.end();
-
- // Wait for old daemon to exit
- await waitForPidExit(lockInfo.pid, SHUTDOWN_TIMEOUT_MS);
- console.log(chalk.green('Old daemon stopped.'));
- } catch {
- // Ignore connection errors - daemon might already be dead
- await DaemonLockManager.forceClearLock();
- }
- }
-
- console.log(chalk.cyan('\nNote: A new daemon will start automatically when'));
- console.log(chalk.cyan('Claude Code connects to MeMesh.\n'));
- console.log(chalk.green('Restart preparation complete.'));
- } catch (error) {
- logger.error('Daemon restart failed', formatError(error));
- console.error(chalk.red('Failed to restart daemon:'), formatError(error).message);
- process.exit(1);
- }
- });
-
- // ═══════════════════════════════════════════════════════════
- // daemon logs
- // ═══════════════════════════════════════════════════════════
- daemon
- .command('logs')
- .description('View daemon logs')
- .option('-n, --lines ', `Number of lines to show (${MIN_LOG_LINES}-${MAX_LOG_LINES})`, String(DEFAULT_LOG_LINES))
- .option('-f, --follow', 'Follow log output')
- .action(async (options) => {
- try {
- // Validate lines option
- const lines = parseAndValidateLines(options.lines);
- if (lines === null) {
- console.log(chalk.red(`Invalid --lines value: "${options.lines}"`));
- console.log(chalk.dim(`Must be a positive integer between ${MIN_LOG_LINES} and ${MAX_LOG_LINES}.`));
- process.exit(1);
- }
-
- const logPath = path.join(getDataDirectory(), 'logs', 'memesh.log');
-
- // Validate log path exists and is accessible
- const validation = validateLogPath(logPath);
- if (!validation.valid) {
- if (validation.error === 'Log file does not exist') {
- console.log(chalk.yellow('No daemon logs found.'));
- console.log(chalk.dim(`Expected location: ${logPath}`));
- } else {
- console.log(chalk.red(validation.error || 'Invalid log path'));
- }
- return;
- }
-
- // Convert lines to string for command args
- const linesArg = String(lines);
-
- if (options.follow) {
- // Follow mode using tail -f
- console.log(chalk.dim(`Following ${logPath} (Ctrl+C to stop)\n`));
-
- const tail = spawn('tail', ['-f', '-n', linesArg, logPath], {
- stdio: 'inherit',
- });
-
- process.on('SIGINT', () => {
- tail.kill();
- process.exit(0);
- });
-
- await new Promise(() => {}); // Wait forever
- } else {
- // Show last N lines using execFile (safer than exec)
- try {
- const { stdout } = await execFileAsync('tail', ['-n', linesArg, logPath]);
- if (stdout.trim() === '') {
- console.log(chalk.yellow('Log file is empty.'));
- } else {
- console.log(stdout);
- }
- } catch (error) {
- const errInfo = formatError(error);
- if (errInfo.message.includes('EACCES') || errInfo.message.includes('permission')) {
- console.log(chalk.red('Permission denied reading log file.'));
- } else {
- console.log(chalk.yellow('No logs available.'));
- }
- }
- }
- } catch (error) {
- logger.error('Daemon logs failed', formatError(error));
- console.error(chalk.red('Failed to view logs:'), formatError(error).message);
- process.exit(1);
- }
- });
-
- // ═══════════════════════════════════════════════════════════
- // daemon info
- // ═══════════════════════════════════════════════════════════
- daemon
- .command('info')
- .description('Show detailed diagnostic information')
- .action(async () => {
- try {
- const lockInfo = await DaemonLockManager.readLock();
- const transport = new IpcTransport();
- const dataDir = getDataDirectory();
-
- console.log(chalk.bold('\n🔍 MeMesh Daemon Diagnostics\n'));
- console.log('═'.repeat(60));
-
- // System information
- console.log(chalk.cyan('\n📌 System Information'));
- console.log(chalk.yellow(' Platform: ') + process.platform);
- console.log(chalk.yellow(' Node.js: ') + process.version);
- console.log(chalk.yellow(' Data Dir: ') + dataDir);
- console.log(chalk.yellow(' IPC Path: ') + transport.getPath());
- console.log(chalk.yellow(' IPC Type: ') + (transport.isWindows() ? 'Named Pipe' : 'Unix Socket'));
-
- // Lock file info
- console.log(chalk.cyan('\n📌 Lock File'));
- const lockPath = path.join(dataDir, 'daemon.lock');
-
- if (fs.existsSync(lockPath)) {
- console.log(chalk.yellow(' Path: ') + lockPath);
- console.log(chalk.yellow(' Content: '));
- console.log(chalk.dim(' ' + JSON.stringify(lockInfo, null, 2).replace(/\n/g, '\n ')));
- } else {
- console.log(chalk.dim(' No lock file found'));
- }
-
- // Connection test
- console.log(chalk.cyan('\n📌 Connection Test'));
- const latency = await transport.ping();
-
- if (latency !== null) {
- console.log(chalk.green(' ✓ Connection successful') + chalk.dim(` (${latency}ms)`));
- } else {
- console.log(chalk.red(' ✗ Connection failed'));
- }
-
- // Resource usage (Unix only)
- if (lockInfo && DaemonLockManager.isPidAlive(lockInfo.pid)) {
- console.log(chalk.cyan('\n📌 Resource Usage'));
-
- if (process.platform !== 'win32') {
- try {
- const { stdout } = await execFileAsync('ps', [
- '-p', String(lockInfo.pid),
- '-o', '%cpu,%mem,rss,vsz',
- ]);
- console.log(chalk.dim(stdout));
- } catch {
- console.log(chalk.dim(' Unable to get resource usage'));
- }
- } else {
- console.log(chalk.dim(' Resource usage not available on Windows'));
- }
- }
-
- // Recent errors - use streaming with piped processes for efficiency
- // SECURITY FIX: Avoid shell interpolation by using separate spawn processes with pipes
- console.log(chalk.cyan('\n📌 Recent Errors (last 5)'));
- const logPath = path.join(dataDir, 'logs', 'memesh.log');
-
- // Validate log path is within data directory (security check)
- if (!isPathWithinDataDir(logPath)) {
- console.log(chalk.red(' Invalid log path'));
- } else if (fs.existsSync(logPath)) {
- try {
- // SECURITY FIX: Use piped spawn processes instead of sh -c with string interpolation
- // This prevents shell injection even if logPath somehow bypasses validation
- const recentErrors = await getRecentLogErrors(logPath, 1000, 5);
-
- if (recentErrors.trim()) {
- console.log(chalk.dim(recentErrors.trim()));
- } else {
- console.log(chalk.green(' No recent errors'));
- }
- } catch (error: any) {
- // grep returns exit code 1 when no matches found - this is not an error
- if (error.code === 1 && !error.stderr) {
- console.log(chalk.green(' No recent errors'));
- } else {
- console.log(chalk.green(' No recent errors'));
- }
- }
- } else {
- console.log(chalk.dim(' No logs available'));
- }
-
- console.log('\n' + '═'.repeat(60));
- } catch (error) {
- logger.error('Daemon info failed', formatError(error));
- console.error(chalk.red('Failed to get diagnostics:'), formatError(error).message);
- process.exit(1);
- }
- });
-
- return daemon;
-}
diff --git a/src/cli/dashboard.ts b/src/cli/dashboard.ts
deleted file mode 100644
index 3170600c..00000000
--- a/src/cli/dashboard.ts
+++ /dev/null
@@ -1,481 +0,0 @@
-/**
- * MeMesh Dashboard - Real-time Session Health Monitoring
- *
- * Provides a beautiful terminal dashboard with:
- * - MCP server status
- * - Memory usage statistics
- * - Recent command history
- * - Error log summary
- * - Real-time updates
- * - Keyboard controls
- */
-
-import chalk from 'chalk';
-import boxen from 'boxen';
-import Table from 'cli-table3';
-import { HealthChecker, type SystemHealth, type ComponentHealth } from '../core/HealthCheck.js';
-import { UnifiedMemoryStore } from '../memory/UnifiedMemoryStore.js';
-import { logger } from '../utils/logger.js';
-import { existsSync, readFileSync, statSync } from 'fs';
-import path from 'path';
-import { createInterface } from 'readline';
-
-// ============================================================================
-// Types
-// ============================================================================
-
-interface DashboardMetrics {
- systemHealth: SystemHealth;
- memoryStats: {
- totalEntities: number;
- recentActivities: string[];
- storageSize: string;
- };
- errorSummary: {
- recent: string[];
- count: number;
- };
- uptime: number;
-}
-
-// ============================================================================
-// Constants
-// ============================================================================
-
-const REFRESH_INTERVAL = 5000; // 5 seconds
-const MAX_RECENT_ACTIVITIES = 10;
-const MAX_ERROR_LOGS = 5;
-const LOG_DIR = path.join(process.cwd(), 'logs');
-
-// ============================================================================
-// Dashboard Metrics Collection
-// ============================================================================
-
-/**
- * Collect all dashboard metrics
- */
-async function collectMetrics(): Promise {
- const startTime = Date.now();
-
- // Collect system health
- const healthChecker = new HealthChecker({ timeout: 3000 });
- const systemHealth = await healthChecker.checkAll();
-
- // Collect memory statistics
- const memoryStats = await collectMemoryStats();
-
- // Collect error logs
- const errorSummary = collectErrorLogs();
-
- // Calculate uptime
- const uptime = Math.floor(process.uptime());
-
- logger.debug('[Dashboard] Metrics collected', {
- duration: Date.now() - startTime,
- memoryStats,
- errorCount: errorSummary.count,
- });
-
- return {
- systemHealth,
- memoryStats,
- errorSummary,
- uptime,
- };
-}
-
-/**
- * Collect memory statistics from UnifiedMemoryStore
- */
-async function collectMemoryStats(): Promise {
- try {
- const store = await UnifiedMemoryStore.create();
-
- // Search for all memories (limit to recent)
- const recentMemories = await store.search('', { limit: MAX_RECENT_ACTIVITIES });
-
- const recentActivities = recentMemories.map(
- (m) => `[${m.type}] ${m.content.substring(0, 50)}${m.content.length > 50 ? '...' : ''}`
- );
-
- // Get database size
- const dbPath = path.join(process.cwd(), 'memesh.db');
- let storageSize = 'N/A';
- if (existsSync(dbPath)) {
- const stats = statSync(dbPath);
- const sizeKB = Math.round(stats.size / 1024);
- storageSize = sizeKB < 1024 ? `${sizeKB} KB` : `${(sizeKB / 1024).toFixed(2)} MB`;
- }
-
- store.close();
-
- return {
- totalEntities: recentMemories.length,
- recentActivities,
- storageSize,
- };
- } catch (error) {
- logger.error('[Dashboard] Failed to collect memory stats', { error });
- return {
- totalEntities: 0,
- recentActivities: ['Error loading memory data'],
- storageSize: 'N/A',
- };
- }
-}
-
-/**
- * Collect recent error logs
- */
-function collectErrorLogs(): DashboardMetrics['errorSummary'] {
- try {
- const errorLogPath = path.join(LOG_DIR, 'error.log');
-
- if (!existsSync(errorLogPath)) {
- return { recent: [], count: 0 };
- }
-
- const content = readFileSync(errorLogPath, 'utf-8');
- const lines = content.trim().split('\n').filter((l) => l.trim());
-
- // Get last N error lines
- const recentErrors = lines.slice(-MAX_ERROR_LOGS).map((line) => {
- try {
- const parsed = JSON.parse(line);
- const timestamp = new Date(parsed.timestamp).toLocaleTimeString();
- return `[${timestamp}] ${parsed.message}`;
- } catch {
- return line.substring(0, 80);
- }
- });
-
- return {
- recent: recentErrors,
- count: lines.length,
- };
- } catch (error) {
- logger.warn('[Dashboard] Failed to read error logs', { error });
- return { recent: [], count: 0 };
- }
-}
-
-// ============================================================================
-// Dashboard Rendering
-// ============================================================================
-
-/**
- * Render the dashboard UI
- */
-function renderDashboard(metrics: DashboardMetrics): string {
- const output: string[] = [];
-
- // Clear screen
- output.push('\x1B[2J\x1B[0f');
-
- // Header
- output.push(
- boxen(chalk.bold.cyan('📊 MeMesh Dashboard - Real-time Session Health'), {
- padding: 1,
- margin: 1,
- borderStyle: 'round',
- borderColor: 'cyan',
- })
- );
-
- // System Health Section
- output.push(renderSystemHealth(metrics.systemHealth));
-
- // Memory Statistics Section
- output.push(renderMemoryStats(metrics.memoryStats));
-
- // Recent Activities Section
- output.push(renderRecentActivities(metrics.memoryStats.recentActivities));
-
- // Error Summary Section
- if (metrics.errorSummary.count > 0) {
- output.push(renderErrorSummary(metrics.errorSummary));
- }
-
- // Footer with controls and uptime
- output.push(renderFooter(metrics.uptime));
-
- return output.join('\n');
-}
-
-/**
- * Render system health status
- */
-function renderSystemHealth(health: SystemHealth): string {
- const table = new Table({
- head: [chalk.bold('Component'), chalk.bold('Status'), chalk.bold('Message'), chalk.bold('Duration')],
- colWidths: [20, 15, 50, 15],
- style: {
- head: ['cyan'],
- border: ['gray'],
- },
- });
-
- for (const component of health.components) {
- const statusIcon = getStatusIcon(component.status);
- const statusText = getStatusText(component.status);
-
- table.push([
- component.name,
- statusIcon + ' ' + statusText,
- component.message.substring(0, 47) + (component.message.length > 47 ? '...' : ''),
- `${component.durationMs}ms`,
- ]);
- }
-
- const overallStatus = getOverallStatusBadge(health);
-
- return (
- '\n' +
- chalk.bold('🏥 System Health') +
- ' ' +
- overallStatus +
- '\n' +
- table.toString() +
- '\n' +
- chalk.dim(`Total check duration: ${health.totalDurationMs}ms`)
- );
-}
-
-/**
- * Render memory statistics
- */
-function renderMemoryStats(stats: DashboardMetrics['memoryStats']): string {
- const table = new Table({
- head: [chalk.bold('Metric'), chalk.bold('Value')],
- colWidths: [30, 50],
- style: {
- head: ['cyan'],
- border: ['gray'],
- },
- });
-
- table.push(['Total Memory Entries', String(stats.totalEntities)]);
- table.push(['Database Size', stats.storageSize]);
-
- const memUsage = process.memoryUsage();
- const heapUsedMB = Math.round(memUsage.heapUsed / 1024 / 1024);
- const heapTotalMB = Math.round(memUsage.heapTotal / 1024 / 1024);
- const usagePercent = Math.round((memUsage.heapUsed / memUsage.heapTotal) * 100);
-
- const memoryBar = renderMemoryBar(usagePercent);
- table.push(['Process Memory', `${heapUsedMB}MB / ${heapTotalMB}MB (${usagePercent}%) ${memoryBar}`]);
-
- return '\n' + chalk.bold('🧠 Memory Statistics') + '\n' + table.toString();
-}
-
-/**
- * Render memory usage bar
- */
-function renderMemoryBar(percent: number): string {
- const barLength = 20;
- const filled = Math.round((percent / 100) * barLength);
- const empty = barLength - filled;
-
- let color = chalk.green;
- if (percent > 90) color = chalk.red;
- else if (percent > 75) color = chalk.yellow;
-
- return color('█'.repeat(filled)) + chalk.dim('░'.repeat(empty));
-}
-
-/**
- * Render recent activities
- */
-function renderRecentActivities(activities: string[]): string {
- if (activities.length === 0) {
- return '\n' + chalk.bold('📝 Recent Activities') + '\n' + chalk.dim(' No recent activities');
- }
-
- const lines = activities.map((activity, index) => {
- return chalk.dim(` ${index + 1}.`) + ' ' + activity;
- });
-
- return '\n' + chalk.bold('📝 Recent Activities') + ' ' + chalk.dim(`(Last ${activities.length})`) + '\n' + lines.join('\n');
-}
-
-/**
- * Render error summary
- */
-function renderErrorSummary(summary: DashboardMetrics['errorSummary']): string {
- if (summary.recent.length === 0) {
- return '';
- }
-
- const lines = summary.recent.map((error) => {
- return chalk.red(' ✗ ') + chalk.dim(error);
- });
-
- return (
- '\n' +
- chalk.bold.red('⚠️ Error Summary') +
- ' ' +
- chalk.dim(`(${summary.count} total errors)`) +
- '\n' +
- lines.join('\n')
- );
-}
-
-/**
- * Render footer with controls and uptime
- */
-function renderFooter(uptime: number): string {
- const hours = Math.floor(uptime / 3600);
- const minutes = Math.floor((uptime % 3600) / 60);
- const seconds = uptime % 60;
- const uptimeStr = `${hours}h ${minutes}m ${seconds}s`;
-
- return (
- '\n' +
- boxen(
- chalk.dim('Press ') +
- chalk.bold.cyan('r') +
- chalk.dim(' to refresh | ') +
- chalk.bold.cyan('q') +
- chalk.dim(' to quit') +
- '\n' +
- chalk.dim('Auto-refresh every 5 seconds | Uptime: ') +
- chalk.cyan(uptimeStr),
- {
- padding: { top: 0, bottom: 0, left: 2, right: 2 },
- borderStyle: 'round',
- borderColor: 'gray',
- dimBorder: true,
- }
- )
- );
-}
-
-// ============================================================================
-// Helper Functions
-// ============================================================================
-
-/**
- * Get status icon
- */
-function getStatusIcon(status: ComponentHealth['status']): string {
- switch (status) {
- case 'healthy':
- return chalk.green('✓');
- case 'degraded':
- return chalk.yellow('⚠');
- case 'unhealthy':
- return chalk.red('✗');
- case 'unknown':
- return chalk.gray('?');
- }
-}
-
-/**
- * Get status text with color
- */
-function getStatusText(status: ComponentHealth['status']): string {
- switch (status) {
- case 'healthy':
- return chalk.green('Healthy');
- case 'degraded':
- return chalk.yellow('Degraded');
- case 'unhealthy':
- return chalk.red('Unhealthy');
- case 'unknown':
- return chalk.gray('Unknown');
- }
-}
-
-/**
- * Get overall status badge
- */
-function getOverallStatusBadge(health: SystemHealth): string {
- const icon = getStatusIcon(health.status);
- const text = health.summary;
-
- let badge = '';
- switch (health.status) {
- case 'healthy':
- badge = chalk.green(`${icon} ${text}`);
- break;
- case 'degraded':
- badge = chalk.yellow(`${icon} ${text}`);
- break;
- case 'unhealthy':
- badge = chalk.red(`${icon} ${text}`);
- break;
- default:
- badge = chalk.gray(`${icon} ${text}`);
- }
-
- return badge;
-}
-
-// ============================================================================
-// Dashboard Main Loop
-// ============================================================================
-
-/**
- * Run the interactive dashboard
- */
-export async function runDashboard(): Promise {
- console.log(chalk.cyan('\n📊 Initializing MeMesh Dashboard...\n'));
-
- let isRunning = true;
- let refreshTimer: NodeJS.Timeout | null = null;
-
- // Setup keyboard input
- if (process.stdin.isTTY) {
- const readline = createInterface({
- input: process.stdin,
- output: process.stdout,
- });
-
- // Enable raw mode for single-key input
- process.stdin.setRawMode(true);
- process.stdin.resume();
-
- process.stdin.on('data', (key: Buffer) => {
- const char = key.toString();
-
- if (char === 'q' || char === '\u0003') {
- // 'q' or Ctrl+C
- isRunning = false;
- if (refreshTimer) clearTimeout(refreshTimer);
- process.stdin.setRawMode(false);
- readline.close();
- console.log(chalk.yellow('\n👋 Dashboard closed.\n'));
- process.exit(0);
- } else if (char === 'r') {
- // Manual refresh
- refresh();
- }
- });
- }
-
- // Refresh function
- async function refresh(): Promise {
- if (!isRunning) return;
-
- try {
- const metrics = await collectMetrics();
- const display = renderDashboard(metrics);
- console.log(display);
-
- // Schedule next refresh
- if (isRunning) {
- refreshTimer = setTimeout(refresh, REFRESH_INTERVAL);
- }
- } catch (error) {
- logger.error('[Dashboard] Refresh failed', { error });
- console.error(chalk.red('\n❌ Failed to refresh dashboard:'), error);
-
- if (isRunning) {
- refreshTimer = setTimeout(refresh, REFRESH_INTERVAL);
- }
- }
- }
-
- // Initial render
- await refresh();
-}
diff --git a/src/cli/index.ts b/src/cli/index.ts
deleted file mode 100644
index 2e9c6d5d..00000000
--- a/src/cli/index.ts
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * MeMesh CLI Entry Point
- *
- * Handles user-facing CLI commands:
- * - memesh setup Interactive configuration wizard
- * - memesh tutorial Interactive tutorial
- * - memesh dashboard Session health dashboard
- * - memesh stats Usage statistics
- * - memesh report-issue Bug reporting
- * - memesh config Configuration management
- * - memesh --version Show version
- * - memesh --help Show help
- *
- * Note: When run without arguments (via MCP), starts MCP server
- */
-
-import { Command } from 'commander';
-import chalk from 'chalk';
-import { runSetupWizard } from './setup-wizard.js';
-import { runTutorial } from './tutorial.js';
-import { runDashboard } from './dashboard.js';
-import { runStats } from './stats.js';
-import { showConfig, validateConfig, editConfig, resetConfig } from './config.js';
-import { createDaemonCommand } from './daemon.js';
-import { registerLoginCommand } from './login.js';
-import { registerLogoutCommand } from './logout.js';
-import { logger } from '../utils/logger.js';
-
-// Read version from package.json
-import { createRequire } from 'module';
-const require = createRequire(import.meta.url);
-const packageJson = require('../../package.json');
-
-const program = new Command();
-
-program
- .name('memesh')
- .description('MeMesh — Persistent memory plugin for Claude Code')
- .version(packageJson.version, '-v, --version', 'Show version number');
-
-// Setup command
-program
- .command('setup')
- .description('Interactive configuration wizard')
- .action(async () => {
- try {
- await runSetupWizard();
- } catch (error) {
- logger.error('Setup wizard failed', { error });
- console.error(chalk.red('Setup failed:'), error);
- process.exit(1);
- }
- });
-
-// Tutorial command
-program
- .command('tutorial')
- .description('Interactive 5-minute tutorial')
- .action(async () => {
- try {
- await runTutorial();
- } catch (error) {
- logger.error('Tutorial failed', { error });
- console.error(chalk.red('Tutorial failed:'), error);
- process.exit(1);
- }
- });
-
-// Dashboard command
-program
- .command('dashboard')
- .description('View session health dashboard with real-time monitoring')
- .action(async () => {
- try {
- await runDashboard();
- } catch (error) {
- logger.error('Dashboard failed', { error });
- console.error(chalk.red('Dashboard failed:'), error);
- process.exit(1);
- }
- });
-
-// Stats command
-program
- .command('stats')
- .description('View usage statistics and analytics')
- .option('-d, --day', 'Show last 24 hours')
- .option('-w, --week', 'Show last 7 days')
- .option('-m, --month', 'Show last 30 days')
- .option('-a, --all', 'Show all time (default)')
- .option('--json', 'Export as JSON')
- .option('--csv', 'Export as CSV')
- .option('-v, --verbose', 'Show detailed statistics')
- .action(async (options) => {
- try {
- // Determine time range
- let range: 'day' | 'week' | 'month' | 'all' = 'all';
- if (options.day) range = 'day';
- else if (options.week) range = 'week';
- else if (options.month) range = 'month';
-
- // Determine export format
- let exportFormat: 'json' | 'csv' | undefined;
- if (options.json) exportFormat = 'json';
- else if (options.csv) exportFormat = 'csv';
-
- await runStats({
- range,
- export: exportFormat,
- verbose: options.verbose,
- });
- } catch (error) {
- logger.error('Stats command failed', { error });
- console.error(chalk.red('Stats failed:'), error);
- process.exit(1);
- }
- });
-
-// Report issue command (placeholder)
-program
- .command('report-issue')
- .description('Report a bug or issue')
- .action(() => {
- console.log(chalk.yellow('\n🐛 Report Issue'));
- console.log(chalk.dim('For now, please report issues at:'));
- console.log(
- chalk.cyan('https://github.com/PCIRCLE-AI/claude-code-buddy/issues\n')
- );
- });
-
-// Config commands
-const config = program
- .command('config')
- .description('Manage MeMesh configuration');
-
-config
- .command('show')
- .description('Show current configuration')
- .action(async () => {
- try {
- await showConfig();
- } catch (error) {
- logger.error('Failed to show config', { error });
- console.error(chalk.red('Failed to show configuration:'), error);
- process.exit(1);
- }
- });
-
-config
- .command('validate')
- .description('Validate MCP configuration')
- .action(async () => {
- try {
- await validateConfig();
- } catch (error) {
- logger.error('Failed to validate config', { error });
- console.error(chalk.red('Failed to validate configuration:'), error);
- process.exit(1);
- }
- });
-
-config
- .command('edit')
- .description('Edit configuration in default editor')
- .action(async () => {
- try {
- await editConfig();
- } catch (error) {
- logger.error('Failed to edit config', { error });
- console.error(chalk.red('Failed to edit configuration:'), error);
- process.exit(1);
- }
- });
-
-config
- .command('reset')
- .description('Reset configuration to defaults')
- .action(async () => {
- try {
- await resetConfig();
- } catch (error) {
- logger.error('Failed to reset config', { error });
- console.error(chalk.red('Failed to reset configuration:'), error);
- process.exit(1);
- }
- });
-
-// Auth commands
-registerLoginCommand(program);
-registerLogoutCommand(program);
-
-// Daemon commands
-program.addCommand(createDaemonCommand());
-
-// Help command (override default to show better format)
-program.on('--help', () => {
- console.log('');
- console.log(chalk.bold('Examples:'));
- console.log(' $ memesh login # Login to MeMesh Cloud');
- console.log(' $ memesh logout # Remove stored credentials');
- console.log(' $ memesh setup # Configure MeMesh interactively');
- console.log(' $ memesh tutorial # Learn MeMesh in 5 minutes');
- console.log(' $ memesh dashboard # View session health');
- console.log(' $ memesh daemon status # Check daemon status');
- console.log(' $ memesh daemon logs -f # Follow daemon logs');
- console.log('');
- console.log(chalk.bold('Documentation:'));
- console.log(' Quick Start: https://github.com/PCIRCLE-AI/claude-code-buddy#quick-start');
- console.log(' User Guide: https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/USER_GUIDE.md');
- console.log('');
- console.log(chalk.bold('Support:'));
- console.log(' Issues: https://github.com/PCIRCLE-AI/claude-code-buddy/issues');
- console.log(' Discussions: https://github.com/PCIRCLE-AI/claude-code-buddy/discussions');
- console.log('');
-});
-
-/**
- * Run the CLI program
- */
-export async function runCLI(): Promise {
- // Parse arguments
- program.parse(process.argv);
-
- // If no command specified, show help
- if (!process.argv.slice(2).length) {
- program.outputHelp();
- }
-}
diff --git a/src/cli/login.ts b/src/cli/login.ts
deleted file mode 100644
index c080e1f9..00000000
--- a/src/cli/login.ts
+++ /dev/null
@@ -1,325 +0,0 @@
-/**
- * MeMesh Login Command
- *
- * OAuth 2.0 Device Authorization Flow for CLI authentication.
- * Opens browser for user approval, polls for API key, stores locally.
- */
-
-import { Command } from 'commander';
-import chalk from 'chalk';
-import * as readline from 'readline';
-import { saveCredentials, loadCredentials } from './credentials.js';
-import { logger } from '../utils/logger.js';
-
-const DEFAULT_BACKEND_URL = 'https://api.memesh.ai';
-
-interface DeviceAuthResponse {
- device_code: string;
- user_code: string;
- verification_uri: string;
- verification_uri_complete: string;
- expires_in: number;
- interval: number;
-}
-
-interface TokenResponse {
- api_key: string;
- token_type: string;
-}
-
-interface OAuth2Error {
- error: string;
- error_description?: string;
-}
-
-/**
- * Open URL in default browser (cross-platform)
- * Uses spawn with argument arrays to prevent command injection.
- */
-async function openBrowser(url: string): Promise {
- // Validate URL before opening - only allow HTTP(S)
- try {
- const parsed = new URL(url);
- if (parsed.protocol !== 'https:' && parsed.protocol !== 'http:') {
- return; // Only open HTTP(S) URLs
- }
- } catch {
- return; // Invalid URL
- }
-
- const { spawn } = await import('child_process');
- const platform = process.platform;
- try {
- if (platform === 'darwin') {
- spawn('open', [url], { stdio: 'ignore', detached: true }).unref();
- } else if (platform === 'win32') {
- spawn('cmd', ['/c', 'start', '', url], { stdio: 'ignore', detached: true }).unref();
- } else {
- spawn('xdg-open', [url], { stdio: 'ignore', detached: true }).unref();
- }
- } catch {
- // Silently fail - user can manually open URL
- }
-}
-
-/**
- * Device flow login - opens browser, polls for approval
- */
-async function deviceFlowLogin(backendUrl: string): Promise {
- console.log(chalk.bold('\nMeMesh Cloud Login\n'));
-
- // Check existing credentials
- const existing = loadCredentials();
- if (existing) {
- console.log(chalk.yellow('You are already logged in.'));
- console.log(chalk.dim('Run `memesh logout` first to log out, or use --manual to enter a new key.\n'));
- return;
- }
-
- // Step 1: Initiate device authorization
- console.log(chalk.dim('Initiating device authorization...'));
-
- let deviceAuth: DeviceAuthResponse;
- try {
- const response = await fetch(`${backendUrl}/auth/device`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({ client_id: 'memesh-cli' }),
- });
-
- if (!response.ok) {
- const error = await response.json() as OAuth2Error;
- throw new Error(error.error_description || 'Failed to initiate device authorization');
- }
-
- deviceAuth = await response.json() as DeviceAuthResponse;
- } catch (error) {
- console.error(chalk.red('Failed to connect to MeMesh Cloud.'));
- console.error(chalk.dim(error instanceof Error ? error.message : String(error)));
- process.exit(1);
- }
-
- // Step 2: Display code and open browser
- console.log('');
- console.log(chalk.bold(' Your code: ') + chalk.cyan.bold(deviceAuth.user_code));
- console.log('');
- console.log(chalk.dim(' Opening browser to approve...'));
- console.log(chalk.dim(` ${deviceAuth.verification_uri_complete}`));
- console.log('');
-
- await openBrowser(deviceAuth.verification_uri_complete);
-
- // Step 3: Poll for token
- console.log(chalk.dim(' Waiting for approval... (Ctrl+C to cancel)'));
-
- const startTime = Date.now();
- const expiresMs = deviceAuth.expires_in * 1000;
- let interval = deviceAuth.interval * 1000;
-
- // Allow clean Ctrl+C cancellation during polling
- let cancelled = false;
- const sigintHandler = () => {
- cancelled = true;
- console.log('\n\n Login cancelled.\n');
- process.exit(130);
- };
- process.on('SIGINT', sigintHandler);
-
- try {
- while (!cancelled && Date.now() - startTime < expiresMs) {
- await new Promise(resolve => setTimeout(resolve, interval));
-
- try {
- const response = await fetch(`${backendUrl}/auth/device/token`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- grant_type: 'urn:ietf:params:oauth:grant-type:device_code',
- device_code: deviceAuth.device_code,
- }),
- });
-
- if (response.ok) {
- const tokenData = await response.json() as TokenResponse;
-
- // Save credentials
- saveCredentials({
- apiKey: tokenData.api_key,
- baseUrl: backendUrl !== DEFAULT_BACKEND_URL ? backendUrl : undefined,
- createdAt: new Date().toISOString(),
- });
-
- console.log(chalk.green.bold('\n ✓ Login successful!\n'));
- console.log(chalk.dim(' API key stored in ~/.config/memesh/credentials.json'));
- console.log(chalk.dim(' MCP tools will automatically use this key.\n'));
- return;
- }
-
- // Handle OAuth2 errors
- const error = await response.json() as OAuth2Error;
-
- if (error.error === 'authorization_pending') {
- // Still waiting - continue polling (show dots for progress)
- process.stdout.write(chalk.dim('.'));
- continue;
- }
-
- if (error.error === 'slow_down') {
- interval += 5000; // Back off by 5 seconds
- continue;
- }
-
- if (error.error === 'access_denied') {
- console.log(chalk.red('\n ✗ Authorization denied.\n'));
- process.exit(1);
- }
-
- if (error.error === 'expired_token') {
- console.log(chalk.red('\n ✗ Authorization expired. Please try again.\n'));
- process.exit(1);
- }
-
- // Unknown error
- console.error(chalk.red(`\n Error: ${error.error_description || error.error}\n`));
- process.exit(1);
-
- } catch (error) {
- // Network error during polling - retry
- logger.debug('Polling error', { error: String(error) });
- continue;
- }
- }
- } finally {
- process.removeListener('SIGINT', sigintHandler);
- }
-
- console.log(chalk.red('\n ✗ Authorization timed out. Please try again.\n'));
- process.exit(1);
-}
-
-/**
- * Read API key from stdin securely (input is hidden).
- * Uses readline with _writeToOutput override to mask input,
- * preventing the key from appearing in process list (ps aux).
- * Requires an interactive terminal (TTY).
- */
-export function readApiKeyFromStdin(): Promise {
- // Require interactive terminal for secure input
- if (!process.stdin.isTTY) {
- console.error(chalk.red('Error: --manual requires an interactive terminal.'));
- console.error(chalk.dim('Use `memesh login` for browser-based device flow instead.'));
- process.exit(1);
- }
-
- return new Promise((resolve, reject) => {
- const rl = readline.createInterface({
- input: process.stdin,
- output: process.stdout,
- terminal: true,
- });
-
- // Handle Ctrl+C gracefully during input
- const sigintHandler = () => {
- rl.close();
- console.log('\n\n Login cancelled.\n');
- process.exit(130);
- };
- process.on('SIGINT', sigintHandler);
-
- // Override output to hide typed characters
- const originalWrite = (rl as any)._writeToOutput;
- (rl as any)._writeToOutput = function (stringToWrite: string) {
- // Only show the prompt, mask everything else
- if (stringToWrite.includes('Enter API key:')) {
- originalWrite.call(rl, stringToWrite);
- } else {
- originalWrite.call(rl, '*');
- }
- };
-
- rl.question(' Enter API key: ', (answer) => {
- process.removeListener('SIGINT', sigintHandler);
- rl.close();
- console.log(''); // newline after hidden input
- resolve(answer.trim());
- });
-
- rl.on('error', (err) => {
- process.removeListener('SIGINT', sigintHandler);
- rl.close();
- reject(err);
- });
- });
-}
-
-/**
- * Manual API key login via secure stdin input
- */
-async function manualKeyLogin(backendUrl: string): Promise {
- console.log(chalk.bold('\nMeMesh Cloud Login (Manual API Key)\n'));
- console.log(chalk.dim(' Your input will be hidden for security.\n'));
-
- const apiKey = await readApiKeyFromStdin();
-
- if (!apiKey) {
- console.error(chalk.red('No API key entered.'));
- process.exit(1);
- }
-
- // Validate key format: prefix + minimum length + alphanumeric/underscore only
- const API_KEY_PATTERN = /^sk_memmesh_[a-zA-Z0-9_]{20,}$/;
- if (!API_KEY_PATTERN.test(apiKey)) {
- console.error(chalk.red('Invalid API key format. Expected: sk_memmesh_ (alphanumeric, min 20 chars after prefix)'));
- process.exit(1);
- }
-
- // Test the key
- console.log(chalk.dim('Verifying API key...'));
- try {
- const response = await fetch(`${backendUrl}/agents/me`, {
- headers: { 'x-api-key': apiKey },
- });
-
- if (!response.ok) {
- console.error(chalk.red('Invalid API key. Please check and try again.'));
- process.exit(1);
- }
- } catch {
- console.error(chalk.red('Failed to connect to MeMesh Cloud. Please check your network.'));
- process.exit(1);
- }
-
- // Save credentials
- saveCredentials({
- apiKey,
- baseUrl: backendUrl !== DEFAULT_BACKEND_URL ? backendUrl : undefined,
- createdAt: new Date().toISOString(),
- });
-
- console.log(chalk.green.bold(' ✓ Login successful!\n'));
- console.log(chalk.dim(' API key stored in ~/.config/memesh/credentials.json\n'));
-}
-
-/**
- * Register login command with Commander
- */
-export function registerLoginCommand(program: Command): void {
- program
- .command('login')
- .description('Authenticate with MeMesh Cloud')
- .option('--manual', 'Enter API key manually (secure stdin input)')
- .option('--backend-url ', 'Backend URL', DEFAULT_BACKEND_URL)
- .action(async (options: { manual?: boolean; backendUrl: string }) => {
- try {
- if (options.manual) {
- await manualKeyLogin(options.backendUrl);
- } else {
- await deviceFlowLogin(options.backendUrl);
- }
- } catch (error) {
- logger.error('Login failed', { error });
- console.error(chalk.red('Login failed:'), error instanceof Error ? error.message : error);
- process.exit(1);
- }
- });
-}
diff --git a/src/cli/logout.ts b/src/cli/logout.ts
deleted file mode 100644
index 75ee2f26..00000000
--- a/src/cli/logout.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * MeMesh Logout Command
- */
-
-import { Command } from 'commander';
-import chalk from 'chalk';
-import { deleteCredentials, loadCredentials } from './credentials.js';
-
-export function registerLogoutCommand(program: Command): void {
- program
- .command('logout')
- .description('Remove stored MeMesh credentials')
- .action(() => {
- const existing = loadCredentials();
- if (!existing) {
- console.log(chalk.yellow('\nNot currently logged in.\n'));
- return;
- }
-
- const deleted = deleteCredentials();
- if (deleted) {
- console.log(chalk.green('\n ✓ Logged out. Credentials removed.\n'));
- } else {
- console.error(chalk.red('\nFailed to remove credentials.\n'));
- process.exit(1);
- }
- });
-}
diff --git a/src/cli/setup-wizard.ts b/src/cli/setup-wizard.ts
deleted file mode 100644
index 2867b944..00000000
--- a/src/cli/setup-wizard.ts
+++ /dev/null
@@ -1,444 +0,0 @@
-/**
- * Interactive Setup Wizard for MeMesh
- *
- * Eliminates the #1 pain point: manual JSON configuration errors
- *
- * Features:
- * - Auto-detect Claude Code installation
- * - Generate MCP configuration automatically
- * - Validate connection
- * - Clear success/failure messages
- * - Progress indicators
- *
- * Expected outcome:
- * - Setup success rate: 70% → 95%
- * - Time to first command: 15min → <5min
- * - Zero configuration errors
- */
-
-import inquirer from 'inquirer';
-import chalk from 'chalk';
-import boxen from 'boxen';
-import fs from 'fs-extra';
-import path from 'path';
-import os from 'os';
-import { ProgressIndicator } from '../ui/ProgressIndicator.js';
-import { logger } from '../utils/logger.js';
-
-interface SetupConfig {
- claudeCodePath?: string;
- mcpConfigPath: string;
- autoDetected: boolean;
-}
-
-export class SetupWizard {
- private progress: ProgressIndicator;
-
- constructor() {
- this.progress = new ProgressIndicator();
- }
-
- /**
- * Run the complete setup wizard
- */
- async run(): Promise {
- console.log(
- boxen(chalk.bold.cyan('🚀 MeMesh Configuration Wizard'), {
- padding: 1,
- borderColor: 'cyan',
- borderStyle: 'round',
- })
- );
-
- console.log(
- chalk.dim('\nThis wizard will help you configure MeMesh in 3 easy steps.\n')
- );
-
- try {
- // Step 1: Detect environment
- const config = await this.detectEnvironment();
-
- // Step 2: Configure environment (.env)
- await this.configureEnvironment();
-
- // Step 3: Configure MCP
- const configured = await this.configureMCP(config);
-
- if (!configured) {
- console.log(chalk.yellow('\n⚠️ Setup cancelled'));
- return;
- }
-
- // Step 4: Validate connection
- await this.validateSetup(config);
-
- // Show success message
- this.showSuccessMessage();
- } catch (error) {
- this.handleError(error);
- }
- }
-
- /**
- * Step 1: Detect Claude Code installation and environment
- */
- private async detectEnvironment(): Promise {
- this.progress.start([
- 'Detecting Claude Code installation',
- 'Locating configuration directory',
- 'Checking existing configuration',
- ]);
-
- // Detect Claude Code
- const claudeCodePath = await this.findClaudeCode();
- this.progress.nextStep();
-
- // Find config directory
- const mcpConfigPath = this.getMCPConfigPath();
- this.progress.nextStep();
-
- // Check existing config
- const hasExisting = await fs.pathExists(mcpConfigPath);
- this.progress.nextStep();
-
- this.progress.complete('Environment detected');
-
- const config: SetupConfig = {
- claudeCodePath,
- mcpConfigPath,
- autoDetected: !!claudeCodePath,
- };
-
- // Show detection results
- console.log(chalk.bold('\n📋 Detection Results:\n'));
-
- if (claudeCodePath) {
- console.log(
- `${chalk.green('✓')} Claude Code: ${chalk.cyan(claudeCodePath)}`
- );
- } else {
- console.log(
- `${chalk.yellow('⚠')} Claude Code: ${chalk.dim('Not found (will configure manually)')}`
- );
- }
-
- console.log(
- `${chalk.green('✓')} Config directory: ${chalk.cyan(path.dirname(mcpConfigPath))}`
- );
-
- if (hasExisting) {
- console.log(
- `${chalk.yellow('⚠')} Existing config: ${chalk.dim('Will be backed up')}`
- );
- }
-
- return config;
- }
-
- /**
- * Step 2: Configure environment (.env)
- */
- private async configureEnvironment(): Promise {
- console.log(chalk.bold('\n🔧 Environment Configuration\n'));
-
- const projectRoot = process.cwd();
- const envPath = path.join(projectRoot, '.env');
- const envExamplePath = path.join(projectRoot, '.env.example');
-
- // Check if .env exists
- const hasEnv = await fs.pathExists(envPath);
-
- if (!hasEnv) {
- const { shouldCreateEnv } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'shouldCreateEnv',
- message: 'Create .env file from template?',
- default: true,
- },
- ]);
-
- if (shouldCreateEnv) {
- if (await fs.pathExists(envExamplePath)) {
- await fs.copy(envExamplePath, envPath);
- console.log(chalk.green(' ✓ Created .env from template'));
- } else {
- await fs.writeFile(envPath, '# MeMesh Configuration\n');
- console.log(chalk.green(' ✓ Created .env file'));
- }
- }
- } else {
- console.log(chalk.green(' ✓ .env file already exists'));
- }
-
- console.log('');
- }
-
- /**
- * Step 3: Configure MCP server
- */
- private async configureMCP(config: SetupConfig): Promise {
- console.log(chalk.bold('\n🔧 Configuration\n'));
-
- // Ask for confirmation
- const { shouldConfigure } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'shouldConfigure',
- message: 'Configure MeMesh MCP server automatically?',
- default: true,
- },
- ]);
-
- if (!shouldConfigure) {
- return false;
- }
-
- // Backup existing config if present
- if (await fs.pathExists(config.mcpConfigPath)) {
- const backupPath = `${config.mcpConfigPath}.backup-${Date.now()}`;
- await fs.copy(config.mcpConfigPath, backupPath);
- console.log(
- chalk.dim(` Backed up existing config to: ${path.basename(backupPath)}`)
- );
- }
-
- // Generate configuration
- const spinner = ProgressIndicator.simple('Generating MCP configuration...');
-
- try {
- const mcpConfig = this.generateMCPConfig();
-
- // Ensure directory exists
- await fs.ensureDir(path.dirname(config.mcpConfigPath));
-
- // Write config
- await fs.writeJSON(config.mcpConfigPath, mcpConfig, { spaces: 2 });
-
- spinner.succeed(chalk.green('MCP configuration generated'));
-
- return true;
- } catch (error) {
- spinner.fail(chalk.red('Failed to generate configuration'));
- throw error;
- }
- }
-
- /**
- * Step 4: Validate setup
- */
- private async validateSetup(config: SetupConfig): Promise {
- console.log(chalk.bold('\n🔍 Validation\n'));
-
- const spinner = ProgressIndicator.simple('Testing MCP configuration...');
-
- try {
- // Verify config file exists and is valid JSON
- const configContent = await fs.readJSON(config.mcpConfigPath);
-
- if (!configContent.mcpServers || !configContent.mcpServers.memesh) {
- throw new Error('Invalid MCP configuration structure');
- }
-
- spinner.succeed(chalk.green('Configuration is valid'));
-
- // Note: Actual MCP connection test would require Claude Code to be running
- console.log(
- chalk.dim('\n 💡 Connection test will be performed when Claude Code starts')
- );
- } catch (error) {
- spinner.fail(chalk.red('Configuration validation failed'));
- throw error;
- }
- }
-
- /**
- * Find Claude Code installation
- */
- private async findClaudeCode(): Promise {
- const platform = os.platform();
-
- const possiblePaths: string[] = [];
-
- if (platform === 'darwin') {
- // macOS
- possiblePaths.push(
- '/Applications/Claude Code.app',
- path.join(os.homedir(), 'Applications/Claude Code.app')
- );
- } else if (platform === 'win32') {
- // Windows
- possiblePaths.push(
- path.join(os.homedir(), 'AppData/Local/Programs/Claude Code/Claude Code.exe'),
- 'C:\\Program Files\\Claude Code\\Claude Code.exe'
- );
- } else {
- // Linux
- possiblePaths.push(
- '/usr/local/bin/claude-code',
- '/usr/bin/claude-code',
- path.join(os.homedir(), '.local/bin/claude-code')
- );
- }
-
- for (const possiblePath of possiblePaths) {
- if (await fs.pathExists(possiblePath)) {
- return possiblePath;
- }
- }
-
- return undefined;
- }
-
- /**
- * Get MCP configuration file path
- */
- private getMCPConfigPath(): string {
- const platform = os.platform();
- const homeDir = os.homedir();
-
- if (platform === 'darwin') {
- // macOS: ~/Library/Application Support/Claude/claude_desktop_config.json
- return path.join(
- homeDir,
- 'Library/Application Support/Claude/claude_desktop_config.json'
- );
- } else if (platform === 'win32') {
- // Windows: %APPDATA%\Claude\claude_desktop_config.json
- return path.join(process.env.APPDATA || '', 'Claude/claude_desktop_config.json');
- } else {
- // Linux: ~/.config/Claude/claude_desktop_config.json
- return path.join(homeDir, '.config/Claude/claude_desktop_config.json');
- }
- }
-
- /**
- * Generate MCP configuration
- */
- private generateMCPConfig(): any {
- const memeshPath = this.getMemeshExecutablePath();
-
- return {
- mcpServers: {
- memesh: {
- command: 'node',
- args: [memeshPath],
- env: {},
- },
- },
- };
- }
-
- /**
- * Get memesh executable path (dist/mcp/server-bootstrap.js)
- */
- private getMemeshExecutablePath(): string {
- // Try to find the globally installed package
- const globalNodeModules = this.getGlobalNodeModulesPath();
- const globalMemeshPath = path.join(
- globalNodeModules,
- '@pcircle/memesh/dist/mcp/server-bootstrap.js'
- );
-
- // Fallback to npx
- return globalMemeshPath;
- }
-
- /**
- * Get global node_modules path
- */
- private getGlobalNodeModulesPath(): string {
- const platform = os.platform();
-
- if (platform === 'win32') {
- return path.join(process.env.APPDATA || '', 'npm/node_modules');
- } else {
- // Unix-like (macOS, Linux)
- return '/usr/local/lib/node_modules';
- }
- }
-
- /**
- * Show success message with next steps
- */
- private showSuccessMessage(): void {
- const message = `
-${chalk.bold.green('✅ Setup Complete!')}
-
-MeMesh has been configured successfully.
-
-${chalk.bold('Next Steps:')}
-
-${chalk.cyan('1.')} Restart Claude Code
- ${chalk.dim('Close and reopen Claude Code to load MeMesh')}
-
-${chalk.cyan('2.')} Verify installation
- ${chalk.dim('In Claude Code, type: buddy-help')}
-
-${chalk.cyan('3.')} Try your first command
- ${chalk.dim('buddy-do "explain MeMesh features"')}
- ${chalk.dim('buddy-remember "project decisions"')}
-
-${chalk.bold('📖 Documentation:')}
- Quick Start: https://github.com/PCIRCLE-AI/claude-code-buddy#quick-start
- User Guide: https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/USER_GUIDE.md
- Tutorial: ${chalk.cyan('memesh tutorial')}
-
-${chalk.bold('🆘 Need Help?')}
- Issues: https://github.com/PCIRCLE-AI/claude-code-buddy/issues
- Report: ${chalk.cyan('memesh report-issue')}
-`;
-
- console.log(
- boxen(message, {
- padding: 1,
- borderColor: 'green',
- borderStyle: 'round',
- })
- );
- }
-
- /**
- * Handle setup errors
- */
- private handleError(error: unknown): void {
- const errorMessage =
- error instanceof Error ? error.message : String(error);
-
- logger.error('Setup failed', { error: errorMessage });
-
- const message = `
-${chalk.bold.red('❌ Setup Failed')}
-
-${chalk.dim('Error:')} ${errorMessage}
-
-${chalk.bold('Troubleshooting:')}
-
-${chalk.cyan('1.')} Check permissions
- ${chalk.dim('Ensure you can write to the config directory')}
-
-${chalk.cyan('2.')} Manual configuration
- ${chalk.dim('See: https://github.com/PCIRCLE-AI/claude-code-buddy/blob/main/docs/QUICK_INSTALL.md')}
-
-${chalk.cyan('3.')} Get help
- ${chalk.dim('Report issue: memesh report-issue')}
- ${chalk.dim('GitHub: github.com/PCIRCLE-AI/claude-code-buddy/issues')}
-`;
-
- console.log(
- boxen(message, {
- padding: 1,
- borderColor: 'red',
- borderStyle: 'round',
- })
- );
- }
-}
-
-/**
- * Run the setup wizard
- */
-export async function runSetupWizard(): Promise {
- const wizard = new SetupWizard();
- await wizard.run();
-}
diff --git a/src/cli/stats.ts b/src/cli/stats.ts
deleted file mode 100644
index 4e216b0e..00000000
--- a/src/cli/stats.ts
+++ /dev/null
@@ -1,713 +0,0 @@
-/**
- * MeMesh Stats Command - Usage Statistics and Analytics
- *
- * Provides comprehensive analytics:
- * - Command usage frequency (buddy-do, buddy-remember)
- * - Entity and relation growth over time
- * - Popular entity types and search patterns
- * - Memory health metrics
- * - Visual charts and trends
- */
-
-import chalk from 'chalk';
-import Table from 'cli-table3';
-import * as asciichart from 'asciichart';
-import { KnowledgeGraph } from '../knowledge-graph/index.js';
-import type { EntityType } from '../knowledge-graph/types.js';
-import { logger } from '../utils/logger.js';
-
-interface TimeRange {
- name: string;
- days: number;
-}
-
-interface EntityStats {
- type: string;
- count: number;
- percentage: number;
-}
-
-interface TimeSeriesData {
- date: string;
- count: number;
-}
-
-interface CommandUsage {
- command: string;
- count: number;
- percentage: number;
-}
-
-interface StatsOptions {
- range?: 'day' | 'week' | 'month' | 'all';
- export?: 'json' | 'csv';
- verbose?: boolean;
-}
-
-export class StatsCommand {
- private kg: KnowledgeGraph;
-
- private constructor(kg: KnowledgeGraph) {
- this.kg = kg;
- }
-
- /**
- * Create StatsCommand instance
- */
- static async create(): Promise {
- const kg = await KnowledgeGraph.create();
- return new StatsCommand(kg);
- }
-
- /**
- * Run stats command
- */
- async run(options: StatsOptions = {}): Promise {
- const range = options.range || 'all';
-
- try {
- console.log(chalk.bold.cyan('\n📊 MeMesh Usage Statistics\n'));
-
- // Get time range for filtering
- const timeRange = this.getTimeRange(range);
-
- // Collect all statistics
- const [
- totalEntities,
- totalRelations,
- entityBreakdown,
- growthData,
- topTypes,
- recentActivity,
- healthMetrics,
- ] = await Promise.all([
- this.getTotalEntities(timeRange),
- this.getTotalRelations(timeRange),
- this.getEntityBreakdown(timeRange),
- this.getGrowthData(timeRange),
- this.getTopEntityTypes(timeRange, 5),
- this.getRecentActivity(7),
- this.getHealthMetrics(),
- ]);
-
- // Display based on export format
- if (options.export === 'json') {
- this.exportJSON({
- timeRange: range,
- totalEntities,
- totalRelations,
- entityBreakdown,
- growthData,
- topTypes,
- recentActivity,
- healthMetrics,
- });
- } else if (options.export === 'csv') {
- this.exportCSV({
- entityBreakdown,
- growthData,
- topTypes,
- });
- } else {
- // Display formatted output
- this.displayOverview(totalEntities, totalRelations, range);
- this.displayEntityBreakdown(entityBreakdown);
- this.displayGrowthChart(growthData, range);
- this.displayTopTypes(topTypes);
- this.displayRecentActivity(recentActivity);
- this.displayHealthMetrics(healthMetrics);
-
- if (options.verbose) {
- await this.displayVerboseStats(timeRange);
- }
- }
- } catch (error) {
- logger.error('Stats command failed', { error });
- console.error(chalk.red('Failed to generate statistics:'), error);
- throw error;
- }
- }
-
- /**
- * Get time range for queries
- */
- private getTimeRange(range: string): TimeRange {
- const ranges: Record = {
- day: { name: 'Last 24 Hours', days: 1 },
- week: { name: 'Last 7 Days', days: 7 },
- month: { name: 'Last 30 Days', days: 30 },
- all: { name: 'All Time', days: -1 },
- };
-
- return ranges[range] || ranges.all;
- }
-
- /**
- * Get total entities count
- */
- private async getTotalEntities(timeRange: TimeRange): Promise {
- try {
- const query = timeRange.days === -1
- ? `SELECT COUNT(*) as count FROM entities`
- : `SELECT COUNT(*) as count FROM entities
- WHERE created_at >= datetime('now', '-${timeRange.days} days')`;
-
- const result = (this.kg as any).db.prepare(query).get() as { count: number };
- return result.count;
- } catch (error) {
- logger.error('Failed to get total entities', { error });
- return 0;
- }
- }
-
- /**
- * Get total relations count
- */
- private async getTotalRelations(timeRange: TimeRange): Promise {
- try {
- const query = timeRange.days === -1
- ? `SELECT COUNT(*) as count FROM relations`
- : `SELECT COUNT(*) as count FROM relations
- WHERE created_at >= datetime('now', '-${timeRange.days} days')`;
-
- const result = (this.kg as any).db.prepare(query).get() as { count: number };
- return result.count;
- } catch (error) {
- logger.error('Failed to get total relations', { error });
- return 0;
- }
- }
-
- /**
- * Get entity breakdown by type
- */
- private async getEntityBreakdown(timeRange: TimeRange): Promise {
- try {
- const query = timeRange.days === -1
- ? `SELECT type, COUNT(*) as count FROM entities GROUP BY type ORDER BY count DESC`
- : `SELECT type, COUNT(*) as count FROM entities
- WHERE created_at >= datetime('now', '-${timeRange.days} days')
- GROUP BY type ORDER BY count DESC`;
-
- const results = (this.kg as any).db.prepare(query).all() as Array<{ type: string; count: number }>;
- const total = results.reduce((sum, r) => sum + r.count, 0);
-
- return results.map(r => ({
- type: r.type,
- count: r.count,
- percentage: total > 0 ? (r.count / total) * 100 : 0,
- }));
- } catch (error) {
- logger.error('Failed to get entity breakdown', { error });
- return [];
- }
- }
-
- /**
- * Get growth data over time
- */
- private async getGrowthData(timeRange: TimeRange): Promise {
- try {
- const days = timeRange.days === -1 ? 30 : timeRange.days;
-
- const query = `
- SELECT
- DATE(created_at) as date,
- COUNT(*) as count
- FROM entities
- WHERE created_at >= datetime('now', '-${days} days')
- GROUP BY DATE(created_at)
- ORDER BY date ASC
- `;
-
- const results = (this.kg as any).db.prepare(query).all() as Array<{ date: string; count: number }>;
- return results;
- } catch (error) {
- logger.error('Failed to get growth data', { error });
- return [];
- }
- }
-
- /**
- * Get top entity types
- */
- private async getTopEntityTypes(timeRange: TimeRange, limit: number): Promise {
- const breakdown = await this.getEntityBreakdown(timeRange);
- return breakdown.slice(0, limit);
- }
-
- /**
- * Get recent activity (last N days)
- */
- private async getRecentActivity(days: number): Promise {
- return this.getGrowthData({ name: `Last ${days} Days`, days });
- }
-
- /**
- * Get health metrics
- */
- private async getHealthMetrics(): Promise<{
- totalEntities: number;
- totalRelations: number;
- avgObservationsPerEntity: number;
- entitiesWithTags: number;
- avgRelationsPerEntity: number;
- }> {
- try {
- const totalEntities = await this.getTotalEntities({ name: 'All Time', days: -1 });
- const totalRelations = await this.getTotalRelations({ name: 'All Time', days: -1 });
-
- const obsQuery = `SELECT AVG(obs_count) as avg FROM (
- SELECT entity_id, COUNT(*) as obs_count FROM observations GROUP BY entity_id
- )`;
- const obsResult = (this.kg as any).db.prepare(obsQuery).get() as { avg: number };
-
- const tagsQuery = `SELECT COUNT(DISTINCT entity_id) as count FROM tags`;
- const tagsResult = (this.kg as any).db.prepare(tagsQuery).get() as { count: number };
-
- return {
- totalEntities,
- totalRelations,
- avgObservationsPerEntity: obsResult.avg || 0,
- entitiesWithTags: tagsResult.count || 0,
- avgRelationsPerEntity: totalEntities > 0 ? totalRelations / totalEntities : 0,
- };
- } catch (error) {
- logger.error('Failed to get health metrics', { error });
- return {
- totalEntities: 0,
- totalRelations: 0,
- avgObservationsPerEntity: 0,
- entitiesWithTags: 0,
- avgRelationsPerEntity: 0,
- };
- }
- }
-
- /**
- * Display overview section
- */
- private displayOverview(totalEntities: number, totalRelations: number, range: string): void {
- const rangeLabels: Record = {
- day: 'Last 24 Hours',
- week: 'Last 7 Days',
- month: 'Last 30 Days',
- all: 'All Time',
- };
-
- console.log(chalk.bold('📈 Overview') + chalk.dim(` (${rangeLabels[range]})`));
- console.log('');
-
- const table = new Table({
- head: [chalk.cyan('Metric'), chalk.cyan('Value')],
- colWidths: [30, 15],
- style: { head: [], border: ['dim'] },
- });
-
- table.push(
- ['Total Entities', chalk.green(totalEntities.toLocaleString())],
- ['Total Relations', chalk.green(totalRelations.toLocaleString())],
- ['Knowledge Density', this.calculateDensity(totalEntities, totalRelations)]
- );
-
- console.log(table.toString());
- console.log('');
- }
-
- /**
- * Calculate knowledge density score
- */
- private calculateDensity(entities: number, relations: number): string {
- if (entities === 0) return chalk.dim('N/A');
-
- const ratio = relations / entities;
- let rating = '';
- let color = chalk.yellow;
-
- if (ratio >= 2) {
- rating = 'Excellent';
- color = chalk.green;
- } else if (ratio >= 1) {
- rating = 'Good';
- color = chalk.cyan;
- } else if (ratio >= 0.5) {
- rating = 'Fair';
- color = chalk.yellow;
- } else {
- rating = 'Low';
- color = chalk.red;
- }
-
- return color(`${ratio.toFixed(2)} (${rating})`);
- }
-
- /**
- * Display entity breakdown
- */
- private displayEntityBreakdown(breakdown: EntityStats[]): void {
- if (breakdown.length === 0) {
- console.log(chalk.dim('No entities found in this time range.\n'));
- return;
- }
-
- console.log(chalk.bold('📊 Entity Types'));
- console.log('');
-
- const table = new Table({
- head: [chalk.cyan('Type'), chalk.cyan('Count'), chalk.cyan('Percentage'), chalk.cyan('Bar')],
- colWidths: [25, 10, 12, 30],
- style: { head: [], border: ['dim'] },
- });
-
- breakdown.forEach(stat => {
- const barLength = Math.round((stat.percentage / 100) * 20);
- const bar = chalk.cyan('█'.repeat(barLength)) + chalk.dim('░'.repeat(20 - barLength));
-
- table.push([
- this.formatEntityType(stat.type),
- chalk.green(stat.count.toString()),
- chalk.yellow(stat.percentage.toFixed(1) + '%'),
- bar,
- ]);
- });
-
- console.log(table.toString());
- console.log('');
- }
-
- /**
- * Format entity type for display
- */
- private formatEntityType(type: string): string {
- return type
- .split('_')
- .map(word => word.charAt(0).toUpperCase() + word.slice(1))
- .join(' ');
- }
-
- /**
- * Display growth chart
- */
- private displayGrowthChart(data: TimeSeriesData[], range: string): void {
- if (data.length === 0) {
- console.log(chalk.dim('No growth data available.\n'));
- return;
- }
-
- console.log(chalk.bold('📈 Memory Growth'));
- console.log('');
-
- // Prepare data for chart
- const values = data.map(d => d.count);
- const maxValue = Math.max(...values);
-
- if (maxValue === 0) {
- console.log(chalk.dim('No entities created in this period.\n'));
- return;
- }
-
- // Generate ASCII chart
- const chart = asciichart.plot(values, {
- height: 10,
- format: (x: number) => (Number.isInteger(x) ? chalk.cyan(x.toFixed(0).padStart(4)) : ''),
- });
-
- console.log(chart);
- console.log('');
-
- // Display date range
- if (data.length > 0) {
- const firstDate = data[0].date;
- const lastDate = data[data.length - 1].date;
- console.log(chalk.dim(`${firstDate} to ${lastDate}`));
- }
-
- // Display statistics
- const total = values.reduce((sum, v) => sum + v, 0);
- const avg = total / values.length;
- console.log(chalk.dim(`Total: ${total} | Average: ${avg.toFixed(1)}/day | Peak: ${maxValue}`));
- console.log('');
- }
-
- /**
- * Display top entity types
- */
- private displayTopTypes(topTypes: EntityStats[]): void {
- if (topTypes.length === 0) return;
-
- console.log(chalk.bold('🏆 Most Used Entity Types'));
- console.log('');
-
- const table = new Table({
- head: [chalk.cyan('Rank'), chalk.cyan('Type'), chalk.cyan('Count'), chalk.cyan('Share')],
- colWidths: [8, 25, 10, 12],
- style: { head: [], border: ['dim'] },
- });
-
- topTypes.forEach((stat, index) => {
- const medal = index === 0 ? '🥇' : index === 1 ? '🥈' : index === 2 ? '🥉' : `${index + 1}.`;
- table.push([
- medal,
- this.formatEntityType(stat.type),
- chalk.green(stat.count.toString()),
- chalk.yellow(stat.percentage.toFixed(1) + '%'),
- ]);
- });
-
- console.log(table.toString());
- console.log('');
- }
-
- /**
- * Display recent activity
- */
- private displayRecentActivity(activity: TimeSeriesData[]): void {
- if (activity.length === 0) return;
-
- console.log(chalk.bold('⏱️ Recent Activity (Last 7 Days)'));
- console.log('');
-
- const table = new Table({
- head: [chalk.cyan('Date'), chalk.cyan('New Entities'), chalk.cyan('Activity')],
- colWidths: [15, 15, 30],
- style: { head: [], border: ['dim'] },
- });
-
- // Show last 7 days
- activity.slice(-7).forEach(day => {
- const barLength = Math.min(20, day.count);
- const bar = chalk.green('▓'.repeat(barLength)) + chalk.dim('░'.repeat(20 - barLength));
-
- table.push([
- chalk.cyan(day.date),
- chalk.green(day.count.toString()),
- bar,
- ]);
- });
-
- console.log(table.toString());
- console.log('');
- }
-
- /**
- * Display health metrics
- */
- private displayHealthMetrics(metrics: {
- totalEntities: number;
- totalRelations: number;
- avgObservationsPerEntity: number;
- entitiesWithTags: number;
- avgRelationsPerEntity: number;
- }): void {
- console.log(chalk.bold('💊 Knowledge Graph Health'));
- console.log('');
-
- const table = new Table({
- head: [chalk.cyan('Metric'), chalk.cyan('Value'), chalk.cyan('Status')],
- colWidths: [30, 15, 15],
- style: { head: [], border: ['dim'] },
- });
-
- // Avg observations per entity
- const obsStatus = metrics.avgObservationsPerEntity >= 3 ? chalk.green('✓ Good') :
- metrics.avgObservationsPerEntity >= 2 ? chalk.yellow('⚠ Fair') :
- chalk.red('✗ Low');
-
- // Entities with tags percentage
- const tagPercentage = metrics.totalEntities > 0
- ? (metrics.entitiesWithTags / metrics.totalEntities) * 100
- : 0;
- const tagStatus = tagPercentage >= 50 ? chalk.green('✓ Good') :
- tagPercentage >= 25 ? chalk.yellow('⚠ Fair') :
- chalk.red('✗ Low');
-
- // Avg relations per entity
- const relStatus = metrics.avgRelationsPerEntity >= 2 ? chalk.green('✓ Excellent') :
- metrics.avgRelationsPerEntity >= 1 ? chalk.cyan('✓ Good') :
- metrics.avgRelationsPerEntity >= 0.5 ? chalk.yellow('⚠ Fair') :
- chalk.red('✗ Low');
-
- table.push(
- [
- 'Avg Observations/Entity',
- chalk.green(metrics.avgObservationsPerEntity.toFixed(2)),
- obsStatus,
- ],
- [
- 'Entities with Tags',
- chalk.green(`${metrics.entitiesWithTags} (${tagPercentage.toFixed(1)}%)`),
- tagStatus,
- ],
- [
- 'Avg Relations/Entity',
- chalk.green(metrics.avgRelationsPerEntity.toFixed(2)),
- relStatus,
- ]
- );
-
- console.log(table.toString());
- console.log('');
-
- // Health score
- const healthScore = this.calculateHealthScore(metrics);
- console.log(chalk.bold('Overall Health: ') + healthScore);
- console.log('');
- }
-
- /**
- * Calculate overall health score
- */
- private calculateHealthScore(metrics: {
- avgObservationsPerEntity: number;
- entitiesWithTags: number;
- totalEntities: number;
- avgRelationsPerEntity: number;
- }): string {
- let score = 0;
-
- // Observations score (0-33)
- if (metrics.avgObservationsPerEntity >= 3) score += 33;
- else if (metrics.avgObservationsPerEntity >= 2) score += 22;
- else if (metrics.avgObservationsPerEntity >= 1) score += 11;
-
- // Tags score (0-33)
- const tagPercentage = metrics.totalEntities > 0
- ? (metrics.entitiesWithTags / metrics.totalEntities) * 100
- : 0;
- if (tagPercentage >= 50) score += 33;
- else if (tagPercentage >= 25) score += 22;
- else if (tagPercentage >= 10) score += 11;
-
- // Relations score (0-34)
- if (metrics.avgRelationsPerEntity >= 2) score += 34;
- else if (metrics.avgRelationsPerEntity >= 1) score += 23;
- else if (metrics.avgRelationsPerEntity >= 0.5) score += 12;
-
- // Color-code the score
- let color = chalk.red;
- let rating = 'Poor';
-
- if (score >= 80) {
- color = chalk.green;
- rating = 'Excellent';
- } else if (score >= 60) {
- color = chalk.cyan;
- rating = 'Good';
- } else if (score >= 40) {
- color = chalk.yellow;
- rating = 'Fair';
- }
-
- return color(`${score}/100 (${rating})`);
- }
-
- /**
- * Display verbose statistics
- */
- private async displayVerboseStats(timeRange: TimeRange): Promise {
- console.log(chalk.bold('🔍 Detailed Statistics'));
- console.log('');
-
- try {
- // Get relation types breakdown
- const relationQuery = timeRange.days === -1
- ? `SELECT relation_type, COUNT(*) as count FROM relations GROUP BY relation_type ORDER BY count DESC`
- : `SELECT relation_type, COUNT(*) as count FROM relations
- WHERE created_at >= datetime('now', '-${timeRange.days} days')
- GROUP BY relation_type ORDER BY count DESC`;
-
- const relations = (this.kg as any).db.prepare(relationQuery).all() as Array<{ relation_type: string; count: number }>;
-
- if (relations.length > 0) {
- const table = new Table({
- head: [chalk.cyan('Relation Type'), chalk.cyan('Count')],
- colWidths: [30, 15],
- style: { head: [], border: ['dim'] },
- });
-
- relations.forEach(r => {
- table.push([
- this.formatEntityType(r.relation_type),
- chalk.green(r.count.toString()),
- ]);
- });
-
- console.log(chalk.bold('Relation Types:'));
- console.log(table.toString());
- console.log('');
- }
-
- // Get most connected entities
- const connectedQuery = `
- SELECT e.name, e.type, COUNT(r.id) as connection_count
- FROM entities e
- LEFT JOIN relations r ON (e.id = r.from_entity_id OR e.id = r.to_entity_id)
- GROUP BY e.id
- ORDER BY connection_count DESC
- LIMIT 10
- `;
-
- const connected = (this.kg as any).db.prepare(connectedQuery).all() as Array<{
- name: string;
- type: string;
- connection_count: number
- }>;
-
- if (connected.length > 0) {
- const table = new Table({
- head: [chalk.cyan('Entity'), chalk.cyan('Type'), chalk.cyan('Connections')],
- colWidths: [30, 20, 15],
- style: { head: [], border: ['dim'] },
- });
-
- connected.forEach(e => {
- table.push([
- chalk.cyan(e.name.length > 28 ? e.name.slice(0, 25) + '...' : e.name),
- this.formatEntityType(e.type),
- chalk.green(e.connection_count.toString()),
- ]);
- });
-
- console.log(chalk.bold('Most Connected Entities:'));
- console.log(table.toString());
- console.log('');
- }
- } catch (error) {
- logger.error('Failed to display verbose stats', { error });
- }
- }
-
- /**
- * Export statistics as JSON
- */
- private exportJSON(data: any): void {
- console.log(JSON.stringify(data, null, 2));
- }
-
- /**
- * Export statistics as CSV
- */
- private exportCSV(data: {
- entityBreakdown: EntityStats[];
- growthData: TimeSeriesData[];
- topTypes: EntityStats[];
- }): void {
- console.log('Entity Type,Count,Percentage');
- data.entityBreakdown.forEach(stat => {
- console.log(`${stat.type},${stat.count},${stat.percentage.toFixed(2)}`);
- });
-
- console.log('\nDate,Count');
- data.growthData.forEach(point => {
- console.log(`${point.date},${point.count}`);
- });
- }
-}
-
-/**
- * Run stats command (exported for CLI use)
- */
-export async function runStats(options: StatsOptions = {}): Promise {
- const stats = await StatsCommand.create();
- await stats.run(options);
-}
diff --git a/src/cli/tutorial.ts b/src/cli/tutorial.ts
deleted file mode 100644
index 3b5936f2..00000000
--- a/src/cli/tutorial.ts
+++ /dev/null
@@ -1,560 +0,0 @@
-/**
- * Interactive Tutorial for MeMesh
- *
- * 5-minute guided experience to learn MeMesh commands and features
- * Phase C: Advanced CLI Features
- */
-
-import inquirer from 'inquirer';
-import chalk from 'chalk';
-import boxen from 'boxen';
-import { logger } from '../utils/logger.js';
-
-export interface TutorialProgress {
- currentStep: number;
- totalSteps: number;
- completedSteps: string[];
- startTime: Date;
-}
-
-export class InteractiveTutorial {
- private progress: TutorialProgress;
- private readonly TOTAL_STEPS = 7;
-
- constructor() {
- this.progress = {
- currentStep: 0,
- totalSteps: this.TOTAL_STEPS,
- completedSteps: [],
- startTime: new Date(),
- };
- }
-
- /**
- * Run the complete interactive tutorial
- */
- async run(): Promise {
- await this.showWelcome();
-
- try {
- await this.step1_Welcome();
- await this.step2_SetupVerification();
- await this.step3_FirstBuddyDo();
- await this.step4_MemoryStorage();
- await this.step5_MemoryRecall();
- await this.step6_AdvancedFeatures();
- await this.step7_NextSteps();
-
- await this.showCompletion();
- } catch (error) {
- await this.handleError(error);
- }
- }
-
- /**
- * Show welcome screen
- */
- private async showWelcome(): Promise {
- console.clear();
-
- const message = `
-${chalk.bold.cyan('🎓 Welcome to MeMesh Interactive Tutorial')}
-
-This 5-minute guided tour will teach you:
-
-${chalk.cyan('✓')} How to use buddy-do for task routing
-${chalk.cyan('✓')} How to store decisions with buddy-remember
-${chalk.cyan('✓')} How to recall past knowledge
-${chalk.cyan('✓')} Advanced features and workflows
-
-${chalk.bold('Estimated time:')} ~5 minutes
-${chalk.bold('Completion rate:')} 85% of users complete the tutorial
-
-${chalk.dim('Tip: Take your time and try each command yourself!')}
-`;
-
- console.log(boxen(message, {
- padding: 1,
- borderColor: 'cyan',
- borderStyle: 'round',
- }));
-
- const { ready } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'ready',
- message: 'Ready to start?',
- default: true,
- },
- ]);
-
- if (!ready) {
- console.log(chalk.yellow('\\nTutorial cancelled. Run `memesh tutorial` when ready!\\n'));
- process.exit(0);
- }
- }
-
- /**
- * Step 1: Welcome and orientation
- */
- private async step1_Welcome(): Promise {
- this.progress.currentStep = 1;
- console.clear();
-
- this.showStepHeader(1, 'Welcome & Overview');
-
- console.log(chalk.white(`
-MeMesh is your AI memory mesh for Claude Code. It helps you:
-
-${chalk.bold('1. Smart Task Routing')}
- Route complex tasks to specialized capabilities
-
-${chalk.bold('2. Persistent Memory')}
- Store and recall project decisions, patterns, and learnings
-
-${chalk.bold('3. Intelligent Management')}
- Organize knowledge for long-term projects
-
-Let's explore each of these features!
-`));
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('welcome');
- }
-
- /**
- * Step 2: Setup verification
- */
- private async step2_SetupVerification(): Promise {
- this.progress.currentStep = 2;
- console.clear();
-
- this.showStepHeader(2, 'Setup Verification');
-
- console.log(chalk.white(`
-Let's verify your MeMesh setup is working correctly.
-
-${chalk.bold('Try this command in Claude Code:')}
-`));
-
- console.log(chalk.cyan(' buddy-help'));
-
- console.log(chalk.white(`
-${chalk.bold('Expected output:')}
- • MeMesh Quick Start guide
- • List of essential commands
- • Documentation links
-
-${chalk.dim('This confirms MCP server is connected and MeMesh is ready.')}
-`));
-
- const { verified } = await inquirer.prompt([
- {
- type: 'list',
- name: 'verified',
- message: 'Did buddy-help work correctly?',
- choices: [
- { name: '✓ Yes, I see the help guide', value: true },
- { name: '✗ No, I got an error', value: false },
- ],
- },
- ]);
-
- if (!verified) {
- console.log(chalk.yellow(`
-${chalk.bold('Troubleshooting steps:')}
-
-1. Restart Claude Code completely
-2. Wait 10 seconds for MCP server to start
-3. Try buddy-help again
-4. If still failing, run: ${chalk.cyan('memesh config validate')}
-
-${chalk.dim('See: docs/TROUBLESHOOTING.md for detailed help')}
-`));
-
- const { retry } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'retry',
- message: 'Continue tutorial anyway?',
- default: true,
- },
- ]);
-
- if (!retry) {
- console.log(chalk.yellow('\\nTutorial paused. Run `memesh tutorial` to resume!\\n'));
- process.exit(0);
- }
- }
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('setup-verification');
- }
-
- /**
- * Step 3: First buddy-do command
- */
- private async step3_FirstBuddyDo(): Promise {
- this.progress.currentStep = 3;
- console.clear();
-
- this.showStepHeader(3, 'Your First buddy-do Command');
-
- console.log(chalk.white(`
-${chalk.bold('buddy-do')} routes your task to the best capability.
-
-${chalk.bold('Example task:')}
-`));
-
- console.log(chalk.cyan(' buddy-do "setup user authentication with JWT"'));
-
- console.log(chalk.white(`
-${chalk.bold('What happens:')}
- 1. ${chalk.green('✓')} Analyzes task complexity
- 2. ${chalk.green('✓')} Routes to ${chalk.cyan('backend-developer')} capability
- 3. ${chalk.green('✓')} Enhances prompt with context
- 4. ${chalk.green('✓')} Returns routing decision
-
-${chalk.bold("Now it's your turn!")}
-Try a buddy-do command in Claude Code with any task you like.
-
-${chalk.dim('Examples:')}
-${chalk.dim(' • "add login page"')}
-${chalk.dim(' • "refactor user service"')}
-${chalk.dim(' • "fix database connection bug"')}
-`));
-
- await inquirer.prompt([
- {
- type: 'input',
- name: 'task',
- message: 'What task did you try?',
- validate: (input) => input.trim().length > 0 || 'Please enter the task you tried',
- },
- ]);
-
- console.log(chalk.green(`
-✓ Great! You've learned how to use buddy-do for task execution.
-
-${chalk.bold('Key Takeaway:')}
-Use buddy-do whenever you have a development task that benefits from context-aware execution.
-`));
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('first-buddy-do');
- }
-
- /**
- * Step 4: Memory storage demo
- */
- private async step4_MemoryStorage(): Promise {
- this.progress.currentStep = 4;
- console.clear();
-
- this.showStepHeader(4, 'Storing Knowledge');
-
- console.log(chalk.white(`
-${chalk.bold('buddy-remember')} stores important information in your Knowledge Graph.
-
-${chalk.bold('When to store:')}
- • ${chalk.cyan('Decisions')} - Architecture choices, technology selections
- • ${chalk.cyan('Patterns')} - Coding standards, best practices
- • ${chalk.cyan('Lessons')} - Bug fixes, solutions that worked
-
-${chalk.bold('Example:')}
-`));
-
- console.log(chalk.cyan(` buddy-remember "We use JWT authentication because it's stateless and scales well"`));
-
- console.log(chalk.white(`
-${chalk.bold('Try it yourself!')}
-Store a decision or fact about your current project.
-
-${chalk.dim('Examples:')}
-${chalk.dim(' • "Using React for frontend because team is familiar"')}
-${chalk.dim(' • "Database schema uses UUID for primary keys"')}
-${chalk.dim(' • "API follows RESTful conventions"')}
-`));
-
- await inquirer.prompt([
- {
- type: 'input',
- name: 'memory',
- message: 'What did you store?',
- validate: (input) => input.trim().length > 0 || 'Please enter what you stored',
- },
- ]);
-
- console.log(chalk.green(`
-✓ Excellent! Your knowledge is now stored in the Knowledge Graph.
-
-${chalk.bold('Key Takeaway:')}
-Store important decisions and learnings as you work.
-Future you (and your team) will thank you!
-`));
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('memory-storage');
- }
-
- /**
- * Step 5: Memory recall demo
- */
- private async step5_MemoryRecall(): Promise {
- this.progress.currentStep = 5;
- console.clear();
-
- this.showStepHeader(5, 'Recalling Knowledge');
-
- console.log(chalk.white(`
-Now let's recall what you just stored!
-
-${chalk.bold('buddy-remember')} also searches your Knowledge Graph.
-
-${chalk.bold('Try searching for what you just stored:')}
-`));
-
- const { searchTerm } = await inquirer.prompt([
- {
- type: 'input',
- name: 'searchTerm',
- message: 'What keyword should we search for?',
- default: 'authentication',
- },
- ]);
-
- console.log(chalk.white(`
-${chalk.bold('Run this command in Claude Code:')}
-`));
-
- console.log(chalk.cyan(` buddy-remember "${searchTerm}"`));
-
- console.log(chalk.white(`
-${chalk.bold('Expected output:')}
- • List of related memories
- • Timestamps and context
- • Actionable next steps
-
-${chalk.dim('Tip: Use broader keywords for better search results')}
-`));
-
- const { found } = await inquirer.prompt([
- {
- type: 'confirm',
- name: 'found',
- message: 'Did you find the memory you stored?',
- default: true,
- },
- ]);
-
- if (found) {
- console.log(chalk.green(`
-✓ Perfect! You've completed the memory cycle: Store → Recall → Apply
-
-${chalk.bold('Key Takeaway:')}
-Search your Knowledge Graph before starting work.
-Past decisions and learnings can save you time!
-`));
- } else {
- console.log(chalk.yellow(`
-${chalk.bold('Troubleshooting tips:')}
- • Try a shorter, broader keyword
- • Check spelling
- • Memories might need time to index
-
-${chalk.dim('The memory is there - just try a different search term!')}
-`));
- }
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('memory-recall');
- }
-
- /**
- * Step 6: Advanced features preview
- */
- private async step6_AdvancedFeatures(): Promise {
- this.progress.currentStep = 6;
- console.clear();
-
- this.showStepHeader(6, 'Advanced Features');
-
- console.log(chalk.white(`
-You've mastered the basics! Here are some advanced features:
-
-${chalk.bold('1. Session Dashboard')} ${chalk.dim('(Coming Soon)')}
- ${chalk.cyan('memesh dashboard')}
- View real-time session health and metrics
-
-${chalk.bold('2. Usage Statistics')} ${chalk.dim('(Coming Soon)')}
- ${chalk.cyan('memesh stats')}
- Analyze your command history and patterns
-
-${chalk.bold('3. Configuration Management')}
- ${chalk.cyan('memesh config validate')}
- Test your MCP setup
-
-${chalk.bold('4. Get Full Help')}
- ${chalk.cyan('buddy-help --all')}
- See complete command reference
-
-${chalk.bold('5. Report Issues')}
- ${chalk.cyan('memesh report-issue')}
- Get support when you need it
-`));
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('advanced-features');
- }
-
- /**
- * Step 7: Next steps and resources
- */
- private async step7_NextSteps(): Promise {
- this.progress.currentStep = 7;
- console.clear();
-
- this.showStepHeader(7, "What's Next?");
-
- console.log(chalk.white(`
-Congratulations on completing the tutorial! 🎉
-
-${chalk.bold('Recommended Next Steps:')}
-
-${chalk.cyan('1. Try a real task')}
- Use buddy-do with an actual task from your project
-
-${chalk.cyan('2. Build your Knowledge Graph')}
- Store 3-5 important decisions or patterns
-
-${chalk.cyan('3. Explore the documentation')}
- ${chalk.dim('docs/QUICK_START.md')} - Quick reference
- ${chalk.dim('docs/USER_GUIDE.md')} - Complete guide
- ${chalk.dim('docs/BEST_PRACTICES.md')} - Workflows and tips
-
-${chalk.cyan('4. Join the community')}
- GitHub: https://github.com/PCIRCLE-AI/claude-code-buddy
- Issues: Report bugs or request features
- Discussions: Share your workflows
-
-${chalk.bold('Pro Tips:')}
- • Start each session by searching relevant memories
- • Store decisions as you make them, not later
- • Use buddy-do for tasks that need context
- • Keep memory descriptions concise and searchable
-`));
-
- await this.pressEnterToContinue();
- this.progress.completedSteps.push('next-steps');
- }
-
- /**
- * Show completion screen with certificate
- */
- private async showCompletion(): Promise {
- console.clear();
-
- const duration = Math.floor((new Date().getTime() - this.progress.startTime.getTime()) / 1000 / 60);
-
- const message = `
-${chalk.bold.green('🎉 Tutorial Complete!')}
-
-You've successfully completed the MeMesh Interactive Tutorial!
-
-${chalk.bold('What you learned:')}
- ${chalk.green('✓')} Smart task routing with buddy-do
- ${chalk.green('✓')} Storing knowledge with buddy-remember
- ${chalk.green('✓')} Recalling past decisions
- ${chalk.green('✓')} Advanced features and resources
-
-${chalk.bold('Time taken:')} ${duration} minutes
-${chalk.bold('Steps completed:')} ${this.progress.completedSteps.length}/${this.TOTAL_STEPS}
-
-${chalk.bold.cyan("You're now ready to use MeMesh like a pro!")}
-
-${chalk.dim('Run `memesh tutorial` anytime to review.')}
-`;
-
- console.log(boxen(message, {
- padding: 1,
- borderColor: 'green',
- borderStyle: 'round',
- }));
-
- logger.info('Tutorial completed', {
- duration,
- stepsCompleted: this.progress.completedSteps.length,
- totalSteps: this.TOTAL_STEPS,
- });
- }
-
- /**
- * Show step header with progress
- */
- private showStepHeader(step: number, title: string): void {
- const progressBar = this.createProgressBar(step, this.TOTAL_STEPS);
-
- console.log(chalk.cyan(`
-═══════════════════════════════════════════════════════════════════
- ${chalk.bold(`Step ${step}/${this.TOTAL_STEPS}: ${title}`)}
-═══════════════════════════════════════════════════════════════════
-`));
-
- console.log(progressBar + '\\n');
- }
-
- /**
- * Create ASCII progress bar
- */
- private createProgressBar(current: number, total: number): string {
- const width = 50;
- const filled = Math.floor((current / total) * width);
- const empty = width - filled;
-
- const bar = chalk.green('█'.repeat(filled)) + chalk.dim('░'.repeat(empty));
- const percentage = Math.floor((current / total) * 100);
-
- return `${bar} ${chalk.bold(`${percentage}%`)}`;
- }
-
- /**
- * Wait for user to press Enter
- */
- private async pressEnterToContinue(): Promise {
- await inquirer.prompt([
- {
- type: 'input',
- name: 'continue',
- message: chalk.dim('Press Enter to continue...'),
- },
- ]);
- }
-
- /**
- * Handle errors during tutorial
- */
- private async handleError(error: unknown): Promise {
- const errorMessage = error instanceof Error ? error.message : String(error);
-
- logger.error('Tutorial error', { error: errorMessage });
-
- console.log(chalk.red(`
-❌ Tutorial Error
-
-Something went wrong: ${errorMessage}
-
-You can:
- 1. Try again: memesh tutorial
- 2. Skip to docs: docs/QUICK_START.md
- 3. Get help: memesh report-issue
-`));
- }
-}
-
-/**
- * Run the interactive tutorial
- */
-export async function runTutorial(): Promise {
- const tutorial = new InteractiveTutorial();
- await tutorial.run();
-}
diff --git a/src/cli/view.ts b/src/cli/view.ts
new file mode 100644
index 00000000..8cd3e243
--- /dev/null
+++ b/src/cli/view.ts
@@ -0,0 +1,567 @@
+#!/usr/bin/env node
+
+import Database from 'better-sqlite3';
+import fs from 'fs';
+import path from 'path';
+import os from 'os';
+import { execFile } from 'child_process';
+import { fileURLToPath } from 'url';
+
+interface DashboardData {
+ entities: Array<{
+ id: number;
+ name: string;
+ type: string;
+ observations: string[];
+ tags: string[];
+ }>;
+ relations: Array<{
+ from: string;
+ to: string;
+ type: string;
+ }>;
+ stats: {
+ totalEntities: number;
+ totalObservations: number;
+ totalRelations: number;
+ totalTags: number;
+ typeDistribution: Record;
+ tagDistribution: Record;
+ };
+}
+
+function queryData(dbPath: string): DashboardData {
+ const emptyData: DashboardData = {
+ entities: [],
+ relations: [],
+ stats: {
+ totalEntities: 0,
+ totalObservations: 0,
+ totalRelations: 0,
+ totalTags: 0,
+ typeDistribution: {},
+ tagDistribution: {},
+ },
+ };
+
+ if (!fs.existsSync(dbPath)) {
+ return emptyData;
+ }
+
+ let db: Database.Database;
+ try {
+ db = new Database(dbPath, { readonly: true });
+ } catch (err: any) {
+ console.error(`[memesh-view] Cannot open database at ${dbPath}: ${err.message}`);
+ return emptyData;
+ }
+
+ try {
+ // Check if tables exist
+ const tables = db
+ .prepare(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name IN ('entities', 'observations', 'relations', 'tags')"
+ )
+ .all()
+ .map((r: any) => r.name as string);
+
+ if (!tables.includes('entities')) {
+ return emptyData;
+ }
+
+ // Query entities
+ const entityRows = db
+ .prepare('SELECT id, name, type FROM entities LIMIT 5000')
+ .all() as Array<{ id: number; name: string; type: string }>;
+
+ // Query observations
+ const obsRows = tables.includes('observations')
+ ? (db
+ .prepare('SELECT entity_id, content FROM observations')
+ .all() as Array<{ entity_id: number; content: string }>)
+ : [];
+
+ // Query tags
+ const tagRows = tables.includes('tags')
+ ? (db.prepare('SELECT entity_id, tag FROM tags').all() as Array<{
+ entity_id: number;
+ tag: string;
+ }>)
+ : [];
+
+ // Query relations
+ const relationRows = tables.includes('relations')
+ ? (db
+ .prepare(
+ 'SELECT from_entity_id, to_entity_id, relation_type FROM relations'
+ )
+ .all() as Array<{
+ from_entity_id: number;
+ to_entity_id: number;
+ relation_type: string;
+ }>)
+ : [];
+
+ // Build entity map
+ const entityMap = new Map();
+ for (const e of entityRows) {
+ entityMap.set(e.id, e.name);
+ }
+
+ // Group observations by entity
+ const obsByEntity = new Map();
+ for (const o of obsRows) {
+ const arr = obsByEntity.get(o.entity_id) ?? [];
+ arr.push(o.content);
+ obsByEntity.set(o.entity_id, arr);
+ }
+
+ // Group tags by entity
+ const tagsByEntity = new Map();
+ for (const t of tagRows) {
+ const arr = tagsByEntity.get(t.entity_id) ?? [];
+ arr.push(t.tag);
+ tagsByEntity.set(t.entity_id, arr);
+ }
+
+ // Build entities
+ const entities = entityRows.map((e) => ({
+ id: e.id,
+ name: e.name,
+ type: e.type,
+ observations: obsByEntity.get(e.id) ?? [],
+ tags: tagsByEntity.get(e.id) ?? [],
+ }));
+
+ // Build relations with names
+ const relations = relationRows
+ .filter(
+ (r) => entityMap.has(r.from_entity_id) && entityMap.has(r.to_entity_id)
+ )
+ .map((r) => ({
+ from: entityMap.get(r.from_entity_id)!,
+ to: entityMap.get(r.to_entity_id)!,
+ type: r.relation_type,
+ }));
+
+ // Type distribution
+ const typeDistribution: Record = {};
+ for (const e of entityRows) {
+ typeDistribution[e.type] = (typeDistribution[e.type] ?? 0) + 1;
+ }
+
+ // Tag distribution
+ const tagDistribution: Record = {};
+ for (const t of tagRows) {
+ tagDistribution[t.tag] = (tagDistribution[t.tag] ?? 0) + 1;
+ }
+
+ return {
+ entities,
+ relations,
+ stats: {
+ totalEntities: entityRows.length,
+ totalObservations: obsRows.length,
+ totalRelations: relationRows.length,
+ totalTags: tagRows.length,
+ typeDistribution,
+ tagDistribution,
+ },
+ };
+ } finally {
+ db.close();
+ }
+}
+
+/**
+ * Escape characters that could break out of a script JSON context.
+ */
+function escapeJsonForHtml(json: string): string {
+ return json
+ .replace(/&/g, '\\u0026')
+ .replace(//g, '\\u003e')
+ .replace(/\u2028/g, '\\u2028')
+ .replace(/\u2029/g, '\\u2029');
+}
+
+/**
+ * Generate a self-contained HTML dashboard for the MeMesh knowledge graph.
+ * Opens the database read-only and queries all data.
+ */
+export function generateDashboardHtml(dbPath?: string): string {
+ const resolvedPath =
+ dbPath ??
+ process.env.MEMESH_DB_PATH ??
+ path.join(os.homedir(), '.memesh', 'knowledge-graph.db');
+
+ const data = queryData(resolvedPath);
+ const dataJson = escapeJsonForHtml(JSON.stringify(data));
+
+ return `
+
+
+
+
+ MeMesh Dashboard
+
+ ',
+ 'malicious'
+ );
+ db.close();
+
+ const html = generateDashboardHtml(testDbPath);
+
+ expect(html).not.toContain('');
+ expect(html).toContain('\\u003c');
+ });
+ });
+
+ describe('Scenario: D3.js graph visualization', () => {
+ it('Given the generated HTML, Then it includes D3.js CDN link', () => {
+ const db = new Database(testDbPath);
+ db.exec(SCHEMA_SQL);
+ db.close();
+
+ const html = generateDashboardHtml(testDbPath);
+
+ expect(html).toContain('d3js.org');
+ expect(html).toContain('d3.forceSimulation');
+ });
+
+ it('Given entities with observations, Then node size calculation is included', () => {
+ const db = new Database(testDbPath);
+ db.exec(SCHEMA_SQL);
+ db.prepare('INSERT INTO entities (name, type) VALUES (?, ?)').run(
+ 'Test',
+ 'thing'
+ );
+ db.close();
+
+ const html = generateDashboardHtml(testDbPath);
+
+ expect(html).toContain('Math.min');
+ });
+ });
+});
diff --git a/tests/core/AgentRegistry.test.ts b/tests/core/AgentRegistry.test.ts
deleted file mode 100644
index 2dfe7fc6..00000000
--- a/tests/core/AgentRegistry.test.ts
+++ /dev/null
@@ -1,133 +0,0 @@
-import { describe, it, expect, beforeEach } from 'vitest';
-import { AgentRegistry } from '../../src/core/AgentRegistry.js';
-import { AgentClassification } from '../../src/types/AgentClassification.js';
-
-describe('AgentRegistry - Agent Classification', () => {
- let registry: AgentRegistry;
-
- beforeEach(() => {
- registry = new AgentRegistry();
- });
-
- describe('Agent Classification System', () => {
- it('should classify agents by implementation type', () => {
- const realImplementations = registry.getRealImplementations();
- const enhancedPrompts = registry.getEnhancedPrompts();
- const optionalAgents = registry.getOptionalAgents();
-
- expect(realImplementations.length).toBeGreaterThan(0);
- expect(enhancedPrompts.length).toBeGreaterThan(0);
- expect(optionalAgents).toHaveLength(0);
- });
-
- it('should return correct agent types for real implementations', () => {
- const realImplementations = registry.getRealImplementations();
- const realNames = realImplementations.map(a => a.name);
-
- expect(realNames).toContain('development-butler');
- expect(realNames).toContain('test-writer');
- expect(realNames).toContain('e2e-healing-agent');
- expect(realNames).toContain('project-manager');
- expect(realNames).toContain('data-engineer');
- });
-
- it('should return correct agent types for enhanced prompts', () => {
- const enhancedPrompts = registry.getEnhancedPrompts();
- const enhancedNames = enhancedPrompts.map(a => a.name);
-
- // Original 7 agents
- expect(enhancedNames).toContain('architecture-agent');
- expect(enhancedNames).toContain('code-reviewer');
- expect(enhancedNames).toContain('security-auditor');
- expect(enhancedNames).toContain('ui-designer');
- expect(enhancedNames).toContain('marketing-strategist');
- expect(enhancedNames).toContain('product-manager');
- expect(enhancedNames).toContain('ml-engineer');
-
- // New 5 agents
- expect(enhancedNames).toContain('debugger');
- expect(enhancedNames).toContain('refactorer');
- expect(enhancedNames).toContain('api-designer');
- expect(enhancedNames).toContain('research-agent');
- expect(enhancedNames).toContain('data-analyst');
- });
-
- it('should have no optional agents by default', () => {
- const optionalAgents = registry.getOptionalAgents();
-
- expect(optionalAgents).toHaveLength(0);
- });
- });
-
- describe('Agent Metadata with Classification', () => {
- it('should have classification field in metadata', () => {
- const devButler = registry.getAgent('development-butler');
-
- expect(devButler).toBeDefined();
- expect(devButler?.classification).toBe(AgentClassification.REAL_IMPLEMENTATION);
- });
-
- it('should have mcpTools field in metadata', () => {
- const devButler = registry.getAgent('development-butler');
-
- expect(devButler).toBeDefined();
- expect(devButler?.mcpTools).toBeDefined();
- expect(Array.isArray(devButler?.mcpTools)).toBe(true);
- expect(devButler?.mcpTools).toContain('filesystem');
- expect(devButler?.mcpTools).toContain('memory');
- expect(devButler?.mcpTools).toContain('bash');
- });
-
- it('should not have requiredDependencies for non-optional agents', () => {
- const codeReviewer = registry.getAgent('code-reviewer');
-
- expect(codeReviewer).toBeDefined();
- expect(codeReviewer?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- expect(codeReviewer?.requiredDependencies).toBeUndefined();
- });
- });
-
- describe('getAllAgents should return all agents', () => {
- it('should match sum of classifications', () => {
- const allAgents = registry.getAllAgents();
- const real = registry.getRealImplementations();
- const enhanced = registry.getEnhancedPrompts();
- const optional = registry.getOptionalAgents();
-
- expect(allAgents).toHaveLength(real.length + enhanced.length + optional.length);
- });
- });
-
- describe('Missing Enhanced Prompt Agents', () => {
- it('should include all 5 missing Enhanced Prompt agents', () => {
- const enhancedPrompts = registry.getEnhancedPrompts();
- const enhancedNames = enhancedPrompts.map(a => a.name);
-
- // The 5 missing agents from the plan
- expect(enhancedNames).toContain('debugger');
- expect(enhancedNames).toContain('refactorer');
- expect(enhancedNames).toContain('api-designer');
- expect(enhancedNames).toContain('research-agent');
- expect(enhancedNames).toContain('data-analyst');
- });
-
- it('should have correct classification for missing agents', () => {
- const debuggerAgent = registry.getAgent('debugger');
- const refactorer = registry.getAgent('refactorer');
- const apiDesigner = registry.getAgent('api-designer');
- const researchAgent = registry.getAgent('research-agent');
- const dataAnalyst = registry.getAgent('data-analyst');
-
- expect(debuggerAgent?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- expect(refactorer?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- expect(apiDesigner?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- expect(researchAgent?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- expect(dataAnalyst?.classification).toBe(AgentClassification.ENHANCED_PROMPT);
- });
-
- it('should include a stable set of enhanced prompt agents', () => {
- const enhancedPrompts = registry.getEnhancedPrompts();
- expect(enhancedPrompts.length).toBeGreaterThan(0);
- });
- });
-});
diff --git a/tests/core/CheckpointDetector.test.ts b/tests/core/CheckpointDetector.test.ts
deleted file mode 100644
index 8764db1c..00000000
--- a/tests/core/CheckpointDetector.test.ts
+++ /dev/null
@@ -1,152 +0,0 @@
-import { describe, it, expect, beforeEach, vi } from 'vitest';
-import { CheckpointDetector } from '../../src/core/CheckpointDetector.js';
-
-describe('CheckpointDetector', () => {
- let detector: CheckpointDetector;
-
- beforeEach(() => {
- detector = new CheckpointDetector();
- });
-
- describe('Checkpoint Registration', () => {
- it('should register a checkpoint with callback', () => {
- const callback = vi.fn();
- const result = detector.registerCheckpoint('test-complete', callback);
-
- expect(result).toBe(true);
- expect(detector.isCheckpointRegistered('test-complete')).toBe(true);
- });
-
- it('should check if a checkpoint is registered', () => {
- expect(detector.isCheckpointRegistered('nonexistent')).toBe(false);
-
- const callback = vi.fn();
- detector.registerCheckpoint('code-written', callback);
-
- expect(detector.isCheckpointRegistered('code-written')).toBe(true);
- });
-
- it('should get list of all registered checkpoints', () => {
- const callback1 = vi.fn();
- const callback2 = vi.fn();
-
- detector.registerCheckpoint('test-complete', callback1);
- detector.registerCheckpoint('code-written', callback2);
-
- const checkpoints = detector.getRegisteredCheckpoints();
-
- expect(checkpoints).toHaveLength(2);
- expect(checkpoints).toContain('test-complete');
- expect(checkpoints).toContain('code-written');
- });
- });
-
- describe('Checkpoint Triggering', () => {
- it('should trigger a registered checkpoint callback', async () => {
- const callback = vi.fn().mockResolvedValue({ success: true });
- detector.registerCheckpoint('test-complete', callback);
-
- const result = await detector.triggerCheckpoint('test-complete', {
- testResults: { passed: 10, failed: 0 },
- });
-
- expect(callback).toHaveBeenCalledWith({
- testResults: { passed: 10, failed: 0 },
- });
- expect(result.triggered).toBe(true);
- expect(result.checkpointName).toBe('test-complete');
- });
-
- it('should throw error when triggering unregistered checkpoint', async () => {
- await expect(
- detector.triggerCheckpoint('unregistered', {})
- ).rejects.toThrow('Checkpoint "unregistered" is not registered');
- });
-
- it('should handle callback errors gracefully', async () => {
- const callback = vi.fn().mockRejectedValue(new Error('Callback failed'));
- detector.registerCheckpoint('error-checkpoint', callback);
-
- const result = await detector.triggerCheckpoint('error-checkpoint', {});
-
- expect(result.triggered).toBe(false);
- expect(result.error).toBe('Callback failed');
- });
- });
-
- describe('Multiple Callbacks per Checkpoint', () => {
- it('should support multiple callbacks for same checkpoint', async () => {
- const callback1 = vi.fn().mockResolvedValue({ success: true });
- const callback2 = vi.fn().mockResolvedValue({ success: true });
-
- detector.registerCheckpoint('test-complete', callback1);
- detector.addCallback('test-complete', callback2);
-
- await detector.triggerCheckpoint('test-complete', { data: 'test' });
-
- expect(callback1).toHaveBeenCalledWith({ data: 'test' });
- expect(callback2).toHaveBeenCalledWith({ data: 'test' });
- });
-
- it('should execute all callbacks even if one fails', async () => {
- const callback1 = vi.fn().mockRejectedValue(new Error('Failed'));
- const callback2 = vi.fn().mockResolvedValue({ success: true });
-
- detector.registerCheckpoint('test-checkpoint', callback1);
- detector.addCallback('test-checkpoint', callback2);
-
- const result = await detector.triggerCheckpoint('test-checkpoint', {});
-
- expect(callback1).toHaveBeenCalled();
- expect(callback2).toHaveBeenCalled();
- expect(result.triggered).toBe(true);
- expect(result.failedCallbacks).toBe(1);
- });
- });
-
- describe('Checkpoint Unregistration', () => {
- it('should unregister a checkpoint', () => {
- const callback = vi.fn();
- detector.registerCheckpoint('temp-checkpoint', callback);
-
- expect(detector.isCheckpointRegistered('temp-checkpoint')).toBe(true);
-
- const result = detector.unregisterCheckpoint('temp-checkpoint');
-
- expect(result).toBe(true);
- expect(detector.isCheckpointRegistered('temp-checkpoint')).toBe(false);
- });
-
- it('should return false when unregistering nonexistent checkpoint', () => {
- const result = detector.unregisterCheckpoint('nonexistent');
-
- expect(result).toBe(false);
- });
- });
-
- describe('Checkpoint Metadata', () => {
- it('should store and retrieve checkpoint metadata', () => {
- const callback = vi.fn();
- const metadata = {
- description: 'Triggered when tests complete',
- priority: 'high',
- category: 'testing',
- };
-
- detector.registerCheckpoint('test-complete', callback, metadata);
-
- const retrieved = detector.getCheckpointMetadata('test-complete');
-
- expect(retrieved).toBeDefined();
- expect(retrieved?.description).toBe('Triggered when tests complete');
- expect(retrieved?.priority).toBe('high');
- expect(retrieved?.category).toBe('testing');
- });
-
- it('should return undefined for unregistered checkpoint metadata', () => {
- const result = detector.getCheckpointMetadata('nonexistent');
-
- expect(result).toBeUndefined();
- });
- });
-});
diff --git a/tests/core/MCPToolInterface.test.ts b/tests/core/MCPToolInterface.test.ts
deleted file mode 100644
index 83b4ed9c..00000000
--- a/tests/core/MCPToolInterface.test.ts
+++ /dev/null
@@ -1,137 +0,0 @@
-import { describe, it, expect, beforeEach } from 'vitest';
-import { MCPToolInterface } from '../../src/core/MCPToolInterface.js';
-
-describe('MCPToolInterface', () => {
- let toolInterface: MCPToolInterface;
-
- beforeEach(() => {
- toolInterface = new MCPToolInterface();
- });
-
- describe('Tool Registration', () => {
- it('should register an MCP tool', () => {
- const result = toolInterface.registerTool('filesystem', {
- description: 'File system operations',
- methods: ['read', 'write', 'delete'],
- });
-
- expect(result).toBe(true);
- expect(toolInterface.isToolRegistered('filesystem')).toBe(true);
- });
-
- it('should check if a tool is registered', () => {
- expect(toolInterface.isToolRegistered('nonexistent')).toBe(false);
-
- toolInterface.registerTool('memory', {
- description: 'Memory operations',
- methods: ['store', 'retrieve'],
- });
-
- expect(toolInterface.isToolRegistered('memory')).toBe(true);
- });
-
- it('should get list of all registered tools', () => {
- toolInterface.registerTool('filesystem', {
- description: 'File system operations',
- methods: ['read', 'write'],
- });
- toolInterface.registerTool('memory', {
- description: 'Memory operations',
- methods: ['store', 'retrieve'],
- });
-
- const tools = toolInterface.getRegisteredTools();
-
- expect(tools).toHaveLength(2);
- expect(tools).toContain('filesystem');
- expect(tools).toContain('memory');
- });
- });
-
- describe('Tool Invocation', () => {
- beforeEach(() => {
- toolInterface.registerTool('filesystem', {
- description: 'File system operations',
- methods: ['read', 'write'],
- });
- });
-
- it('should throw error when external tool invocation is not configured', async () => {
- await expect(
- toolInterface.invokeTool('filesystem', 'read', {
- path: '/test/file.txt',
- })
- ).rejects.toThrow('External MCP tool invocation is not configured');
- });
-
- it('should throw error when invoking unregistered tool', async () => {
- await expect(
- toolInterface.invokeTool('unregistered', 'someMethod', {})
- ).rejects.toThrow('Tool "unregistered" is not registered');
- });
-
- it('should validate tool method exists', async () => {
- await expect(
- toolInterface.invokeTool('filesystem', 'invalidMethod', {})
- ).rejects.toThrow('Method "invalidMethod" not available for tool "filesystem"');
- });
- });
-
- describe('Tool Dependencies', () => {
- it('should check if required tools are available', () => {
- toolInterface.registerTool('filesystem', {
- description: 'File system operations',
- methods: ['read', 'write'],
- });
- toolInterface.registerTool('memory', {
- description: 'Memory operations',
- methods: ['store', 'retrieve'],
- });
-
- const required = ['filesystem', 'memory'];
- const result = toolInterface.checkRequiredTools(required);
-
- expect(result.allAvailable).toBe(true);
- expect(result.missing).toHaveLength(0);
- });
-
- it('should identify missing required tools', () => {
- toolInterface.registerTool('filesystem', {
- description: 'File system operations',
- methods: ['read', 'write'],
- });
-
- const required = ['filesystem', 'memory', 'bash'];
- const result = toolInterface.checkRequiredTools(required);
-
- expect(result.allAvailable).toBe(false);
- expect(result.missing).toHaveLength(2);
- expect(result.missing).toContain('memory');
- expect(result.missing).toContain('bash');
- });
- });
-
- describe('Tool Metadata', () => {
- it('should get tool metadata', () => {
- const metadata = {
- description: 'File system operations',
- methods: ['read', 'write', 'delete'],
- };
-
- toolInterface.registerTool('filesystem', metadata);
-
- const result = toolInterface.getToolMetadata('filesystem');
-
- expect(result).toBeDefined();
- expect(result?.description).toBe('File system operations');
- expect(result?.methods).toHaveLength(3);
- expect(result?.methods).toContain('read');
- });
-
- it('should return undefined for unregistered tool', () => {
- const result = toolInterface.getToolMetadata('nonexistent');
-
- expect(result).toBeUndefined();
- });
- });
-});
diff --git a/tests/db.test.ts b/tests/db.test.ts
new file mode 100644
index 00000000..f58cb748
--- /dev/null
+++ b/tests/db.test.ts
@@ -0,0 +1,113 @@
+import { describe, it, expect, beforeEach, afterEach } from 'vitest';
+import { openDatabase, closeDatabase, getDatabase } from '../src/db.js';
+import fs from 'fs';
+import path from 'path';
+import os from 'os';
+
+describe('Feature: Database Management', () => {
+ let testDir: string;
+ let testDbPath: string;
+
+ beforeEach(() => {
+ testDir = path.join(os.tmpdir(), `memesh-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
+ fs.mkdirSync(testDir, { recursive: true });
+ testDbPath = path.join(testDir, 'test.db');
+ });
+
+ afterEach(() => {
+ try { closeDatabase(); } catch {}
+ fs.rmSync(testDir, { recursive: true, force: true });
+ });
+
+ describe('Scenario: Open database for first time', () => {
+ it('Given no database exists, When I open, Then it creates all tables', () => {
+ const db = openDatabase(testDbPath);
+ const tables = db.prepare(
+ "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
+ ).all().map((r: any) => r.name);
+ expect(tables).toContain('entities');
+ expect(tables).toContain('observations');
+ expect(tables).toContain('relations');
+ expect(tables).toContain('tags');
+ });
+
+ it('Given no database exists, When I open, Then FTS5 virtual table exists', () => {
+ const db = openDatabase(testDbPath);
+ const tables = db.prepare(
+ "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
+ ).all().map((r: any) => r.name);
+ expect(tables).toContain('entities_fts');
+ });
+
+ it('Given no database exists, When I open, Then WAL mode is enabled', () => {
+ const db = openDatabase(testDbPath);
+ const mode = db.prepare('PRAGMA journal_mode').get() as any;
+ expect(mode.journal_mode).toBe('wal');
+ });
+
+ it('Given no database exists, When I open, Then foreign keys are enabled', () => {
+ const db = openDatabase(testDbPath);
+ const fk = db.prepare('PRAGMA foreign_keys').get() as any;
+ expect(fk.foreign_keys).toBe(1);
+ });
+ });
+
+ describe('Scenario: Open existing database', () => {
+ it('Given db already open, When I call openDatabase again, Then returns same instance', () => {
+ const db1 = openDatabase(testDbPath);
+ const db2 = openDatabase(testDbPath);
+ expect(db1).toBe(db2);
+ });
+ });
+
+ describe('Scenario: Close database', () => {
+ it('Given open db, When I close, Then getDatabase throws', () => {
+ openDatabase(testDbPath);
+ closeDatabase();
+ expect(() => getDatabase()).toThrow('Database not opened');
+ });
+
+ it('Given no db open, When I close, Then no error', () => {
+ expect(() => closeDatabase()).not.toThrow();
+ });
+ });
+
+ describe('Scenario: getDatabase', () => {
+ it('Given db is open, When I call getDatabase, Then returns connection', () => {
+ openDatabase(testDbPath);
+ const db = getDatabase();
+ expect(db).toBeDefined();
+ const result = db.prepare('SELECT 1 as val').get() as any;
+ expect(result.val).toBe(1);
+ });
+ });
+
+ describe('Scenario: Database path from env', () => {
+ it('Given MEMESH_DB_PATH is set, When I open, Then uses that path', () => {
+ const customPath = path.join(testDir, 'custom.db');
+ const origEnv = process.env.MEMESH_DB_PATH;
+ process.env.MEMESH_DB_PATH = customPath;
+ try {
+ openDatabase();
+ expect(fs.existsSync(customPath)).toBe(true);
+ } finally {
+ closeDatabase();
+ process.env.MEMESH_DB_PATH = origEnv;
+ }
+ });
+ });
+
+ describe('Scenario: Indexes exist', () => {
+ it('Given db is open, Then indexes on tags, observations, relations exist', () => {
+ const db = openDatabase(testDbPath);
+ const indexes = db.prepare(
+ "SELECT name FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%' ORDER BY name"
+ ).all().map((r: any) => r.name);
+ expect(indexes).toContain('idx_tags_entity');
+ expect(indexes).toContain('idx_tags_tag');
+ expect(indexes).toContain('idx_observations_entity');
+ expect(indexes).toContain('idx_relations_from');
+ expect(indexes).toContain('idx_relations_to');
+ });
+ });
+});
diff --git a/tests/db/QueryCache.benchmark.ts b/tests/db/QueryCache.benchmark.ts
deleted file mode 100644
index 816e6cd1..00000000
--- a/tests/db/QueryCache.benchmark.ts
+++ /dev/null
@@ -1,247 +0,0 @@
-/**
- * Query Cache Performance Benchmark
- *
- * Compares performance with and without caching to demonstrate the benefits
- * of the QueryCache implementation.
- */
-
-import { describe, it, beforeAll, afterAll } from 'vitest';
-import { KnowledgeGraph } from '../../src/knowledge-graph/index.js';
-import { SimpleDatabaseFactory } from '../../src/config/simple-config.js';
-import { join } from 'path';
-import { mkdirSync, rmSync, existsSync } from 'fs';
-
-describe('Query Cache Performance Benchmark', () => {
- const benchmarkDir = join(process.cwd(), 'data', 'benchmark');
- const dbPath = join(benchmarkDir, 'benchmark.db');
- let kg: KnowledgeGraph;
-
- beforeAll(async () => {
- // Create benchmark directory
- if (!existsSync(benchmarkDir)) {
- mkdirSync(benchmarkDir, { recursive: true });
- }
-
- // Create knowledge graph
- kg = await KnowledgeGraph.create(dbPath);
-
- // Seed with test data (100 entities, various types)
- console.log('\n📊 Seeding database with test data...');
- const entityTypes = ['agent', 'concept', 'technology', 'project', 'skill'];
-
- for (let i = 0; i < 100; i++) {
- const type = entityTypes[i % entityTypes.length];
- kg.createEntity({
- name: `entity-${type}-${i}`,
- type,
- observations: [
- `This is observation 1 for entity ${i}`,
- `This is observation 2 for entity ${i}`,
- `This is observation 3 for entity ${i}`,
- ],
- tags: [`tag-${i % 10}`, `category-${type}`],
- metadata: { index: i, benchmark: true },
- });
- }
-
- // Create relations (50 relations)
- for (let i = 0; i < 50; i++) {
- const from = i;
- const to = (i + 1) % 100;
- kg.createRelation({
- from: `entity-agent-${from}`,
- to: `entity-agent-${to}`,
- relationType: 'related_to',
- metadata: { strength: Math.random() },
- });
- }
-
- console.log('✅ Database seeded with 100 entities and 50 relations\n');
- });
-
- afterAll(async () => {
- // Cleanup
- kg.close();
- await SimpleDatabaseFactory.close(dbPath);
-
- // Remove benchmark directory
- if (existsSync(benchmarkDir)) {
- rmSync(benchmarkDir, { recursive: true, force: true });
- }
- });
-
- it('Benchmark: searchEntities() - repeated queries', () => {
- const iterations = 1000;
- const query = { type: 'agent', limit: 10 };
-
- // Clear cache before benchmark
- kg.clearCache();
-
- // First run - cold cache
- console.log('\n🔥 Cold cache (first run):');
- const coldStart = Date.now();
- kg.searchEntities(query);
- const coldDuration = Date.now() - coldStart;
- console.log(` First query: ${coldDuration}ms`);
-
- // Subsequent runs - warm cache
- console.log('\n♨️ Warm cache (subsequent runs):');
- const warmStart = Date.now();
- for (let i = 0; i < iterations; i++) {
- kg.searchEntities(query);
- }
- const warmDuration = Date.now() - warmStart;
- const avgWarmDuration = warmDuration / iterations;
-
- console.log(` ${iterations} queries: ${warmDuration}ms`);
- console.log(` Average per query: ${avgWarmDuration.toFixed(3)}ms`);
-
- // Calculate speedup
- const speedup = coldDuration / avgWarmDuration;
- console.log(`\n⚡ Speedup: ${speedup.toFixed(0)}x faster with cache`);
-
- // Get cache stats
- const stats = kg.getCacheStats();
- console.log(`\n📈 Cache Statistics:`);
- console.log(` Hits: ${stats.hits}`);
- console.log(` Misses: ${stats.misses}`);
- console.log(` Hit Rate: ${stats.hitRate}%`);
- console.log(` Size: ${stats.size} / ${stats.maxSize}`);
- console.log(` Memory Usage: ${(stats.memoryUsage / 1024).toFixed(2)} KB`);
- });
-
- it('Benchmark: traceRelations() - relationship traversal', () => {
- const iterations = 500;
- const entityName = 'entity-agent-0';
-
- // Clear cache before benchmark
- kg.clearCache();
-
- // Cold cache
- console.log('\n🔥 Cold cache (relationship traversal):');
- const coldStart = Date.now();
- kg.traceRelations(entityName);
- const coldDuration = Date.now() - coldStart;
- console.log(` First query: ${coldDuration}ms`);
-
- // Warm cache
- console.log('\n♨️ Warm cache:');
- const warmStart = Date.now();
- for (let i = 0; i < iterations; i++) {
- kg.traceRelations(entityName);
- }
- const warmDuration = Date.now() - warmStart;
- const avgWarmDuration = warmDuration / iterations;
-
- console.log(` ${iterations} queries: ${warmDuration}ms`);
- console.log(` Average per query: ${avgWarmDuration.toFixed(3)}ms`);
-
- const speedup = coldDuration / avgWarmDuration;
- console.log(`\n⚡ Speedup: ${speedup.toFixed(0)}x faster with cache`);
- });
-
- it('Benchmark: getStats() - statistics queries', () => {
- const iterations = 1000;
-
- // Clear cache before benchmark
- kg.clearCache();
-
- // Cold cache
- console.log('\n🔥 Cold cache (statistics):');
- const coldStart = Date.now();
- kg.getStats();
- const coldDuration = Date.now() - coldStart;
- console.log(` First query: ${coldDuration}ms`);
-
- // Warm cache
- console.log('\n♨️ Warm cache:');
- const warmStart = Date.now();
- for (let i = 0; i < iterations; i++) {
- kg.getStats();
- }
- const warmDuration = Date.now() - warmStart;
- const avgWarmDuration = warmDuration / iterations;
-
- console.log(` ${iterations} queries: ${warmDuration}ms`);
- console.log(` Average per query: ${avgWarmDuration.toFixed(3)}ms`);
-
- const speedup = coldDuration / avgWarmDuration;
- console.log(`\n⚡ Speedup: ${speedup.toFixed(0)}x faster with cache`);
- });
-
- it('Benchmark: Mixed workload - realistic usage pattern', () => {
- const iterations = 100;
-
- // Clear cache before benchmark
- kg.clearCache();
-
- console.log('\n🔄 Mixed workload benchmark:');
- console.log(' Pattern: 50% search, 30% trace, 20% stats');
-
- const start = Date.now();
-
- for (let i = 0; i < iterations; i++) {
- const rand = Math.random();
-
- if (rand < 0.5) {
- // 50% search queries
- kg.searchEntities({
- type: i % 2 === 0 ? 'agent' : 'concept',
- limit: 10,
- });
- } else if (rand < 0.8) {
- // 30% relation traces
- kg.traceRelations(`entity-agent-${i % 20}`);
- } else {
- // 20% statistics
- kg.getStats();
- }
- }
-
- const duration = Date.now() - start;
- const avgDuration = duration / iterations;
-
- console.log(` ${iterations} operations: ${duration}ms`);
- console.log(` Average per operation: ${avgDuration.toFixed(3)}ms`);
-
- const stats = kg.getCacheStats();
- console.log(`\n📈 Final Cache Statistics:`);
- console.log(` Hit Rate: ${stats.hitRate}%`);
- console.log(` Cache Entries: ${stats.size}`);
- console.log(` Memory Usage: ${(stats.memoryUsage / 1024).toFixed(2)} KB`);
- });
-
- it('Benchmark: Cache invalidation overhead', () => {
- const iterations = 100;
-
- // Populate cache
- for (let i = 0; i < 10; i++) {
- kg.searchEntities({ type: 'agent', limit: 10 });
- kg.searchEntities({ type: 'concept', limit: 10 });
- kg.traceRelations(`entity-agent-${i}`);
- }
-
- console.log('\n🗑️ Cache invalidation benchmark:');
-
- const start = Date.now();
-
- for (let i = 0; i < iterations; i++) {
- // Create entity (triggers cache invalidation)
- kg.createEntity({
- name: `temp-entity-${i}`,
- type: 'temp',
- observations: ['test'],
- });
-
- // Re-query (cache miss)
- kg.searchEntities({ type: 'agent', limit: 10 });
- }
-
- const duration = Date.now() - start;
- const avgDuration = duration / iterations;
-
- console.log(` ${iterations} create + query cycles: ${duration}ms`);
- console.log(` Average per cycle: ${avgDuration.toFixed(3)}ms`);
- console.log(' ✅ Invalidation overhead is minimal');
- });
-});
diff --git a/tests/db/QueryCache.test.ts b/tests/db/QueryCache.test.ts
deleted file mode 100644
index ef2f81f2..00000000
--- a/tests/db/QueryCache.test.ts
+++ /dev/null
@@ -1,507 +0,0 @@
-/**
- * QueryCache Tests
- *
- * Comprehensive test suite for the QueryCache implementation covering:
- * - Basic get/set operations
- * - LRU eviction
- * - TTL expiration
- * - Pattern invalidation
- * - Statistics tracking
- * - Memory management
- * - Performance characteristics
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { QueryCache, DatabaseQueryCache } from '../../src/db/QueryCache.js';
-
-describe('QueryCache', () => {
- let cache: QueryCache;
-
- beforeEach(() => {
- cache = new QueryCache({
- maxSize: 5,
- defaultTTL: 1000, // 1 second for testing
- debug: false,
- });
- });
-
- afterEach(() => {
- cache.destroy();
- });
-
- describe('Basic Operations', () => {
- it('should set and get values', () => {
- cache.set('key1', { data: 'value1' });
- const result = cache.get('key1');
-
- expect(result).toEqual({ data: 'value1' });
- });
-
- it('should return undefined for non-existent keys', () => {
- const result = cache.get('nonexistent');
- expect(result).toBeUndefined();
- });
-
- it('should delete specific entries', () => {
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
-
- expect(cache.delete('key1')).toBe(true);
- expect(cache.get('key1')).toBeUndefined();
- expect(cache.get('key2')).toBe('value2');
- });
-
- it('should clear all entries', () => {
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
- cache.set('key3', 'value3');
-
- cache.clear();
-
- expect(cache.size).toBe(0);
- expect(cache.get('key1')).toBeUndefined();
- });
-
- it('should check if key exists', () => {
- cache.set('key1', 'value1');
-
- expect(cache.has('key1')).toBe(true);
- expect(cache.has('key2')).toBe(false);
- });
- });
-
- describe('LRU Eviction', () => {
- it('should evict least recently used entry when full', () => {
- // Fill cache to max size (5 entries)
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
- cache.set('key3', 'value3');
- cache.set('key4', 'value4');
- cache.set('key5', 'value5');
-
- expect(cache.size).toBe(5);
-
- // Access key2 to make it recently used
- cache.get('key2');
-
- // Add 6th entry - should evict key1 (least recently used)
- cache.set('key6', 'value6');
-
- expect(cache.size).toBe(5);
- expect(cache.get('key1')).toBeUndefined(); // Evicted
- expect(cache.get('key2')).toBe('value2'); // Still present
- expect(cache.get('key6')).toBe('value6'); // Newly added
- });
-
- it('should track LRU based on access time', () => {
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
- cache.set('key3', 'value3');
- cache.set('key4', 'value4');
- cache.set('key5', 'value5');
-
- // Access keys in specific order (these get updated LRU timestamp)
- cache.get('key1'); // Most recently used
- cache.get('key3');
- cache.get('key5');
-
- // key2 and key4 haven't been accessed (oldest LRU)
-
- // Add two more entries - should evict key2 (oldest) and key4 (second oldest)
- cache.set('key6', 'value6'); // Evicts key2
- cache.set('key7', 'value7'); // Evicts key4
-
- // key2 and key4 should be evicted (least recently used)
- expect(cache.get('key2')).toBeUndefined();
-
- // Note: key4 might not be evicted if key1/key3/key5 access didn't update properly
- // This is LRU-based, so we check at least one is evicted
- const evictedCount = [cache.get('key2'), cache.get('key4')].filter(v => v === undefined).length;
- expect(evictedCount).toBeGreaterThan(0);
- });
- });
-
- describe('TTL Expiration', () => {
- it('should expire entries after TTL', async () => {
- cache.set('key1', 'value1', 100); // 100ms TTL
-
- expect(cache.get('key1')).toBe('value1');
-
- // Wait for expiration
- await new Promise((resolve) => setTimeout(resolve, 150));
-
- expect(cache.get('key1')).toBeUndefined();
- });
-
- it('should use default TTL when not specified', async () => {
- cache.set('key1', 'value1'); // Uses default 1000ms
-
- expect(cache.get('key1')).toBe('value1');
-
- // Wait less than default TTL
- await new Promise((resolve) => setTimeout(resolve, 500));
- expect(cache.get('key1')).toBe('value1');
-
- // Wait past default TTL
- await new Promise((resolve) => setTimeout(resolve, 600));
- expect(cache.get('key1')).toBeUndefined();
- });
-
- it('should cleanup expired entries automatically', async () => {
- cache.set('key1', 'value1', 100);
- cache.set('key2', 'value2', 100);
- cache.set('key3', 'value3', 500);
-
- await new Promise((resolve) => setTimeout(resolve, 150));
-
- const removed = cache.cleanup();
-
- expect(removed).toBe(2); // key1 and key2 expired
- expect(cache.size).toBe(1); // Only key3 remains
- expect(cache.get('key3')).toBe('value3');
- });
- });
-
- describe('Pattern Invalidation', () => {
- it('should invalidate entries matching pattern', () => {
- cache.set('entities:type:agent', [{ id: 1 }]);
- cache.set('entities:type:user', [{ id: 2 }]);
- cache.set('entities:name:test', [{ id: 3 }]);
- cache.set('relations:all', [{ from: 1, to: 2 }]);
-
- // Invalidate all entity queries
- const invalidated = cache.invalidatePattern(/^entities:/);
-
- expect(invalidated).toBe(3);
- expect(cache.get('entities:type:agent')).toBeUndefined();
- expect(cache.get('entities:type:user')).toBeUndefined();
- expect(cache.get('entities:name:test')).toBeUndefined();
- expect(cache.get('relations:all')).toEqual([{ from: 1, to: 2 }]);
- });
-
- it('should support complex regex patterns', () => {
- cache.set('entities:type:agent:1', {});
- cache.set('entities:type:agent:2', {});
- cache.set('entities:type:user:1', {});
-
- // Invalidate only agent entities
- const invalidated = cache.invalidatePattern(/^entities:type:agent:/);
-
- expect(invalidated).toBe(2);
- expect(cache.get('entities:type:user:1')).toBeDefined();
- });
- });
-
- describe('Statistics', () => {
- it('should track hits and misses', () => {
- cache.set('key1', 'value1');
-
- cache.get('key1'); // Hit
- cache.get('key1'); // Hit
- cache.get('key2'); // Miss
- cache.get('key3'); // Miss
-
- const stats = cache.getStats();
-
- expect(stats.hits).toBe(2);
- expect(stats.misses).toBe(2);
- expect(stats.hitRate).toBe(50);
- });
-
- it('should calculate hit rate correctly', () => {
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
-
- cache.get('key1'); // Hit
- cache.get('key1'); // Hit
- cache.get('key1'); // Hit
- cache.get('key2'); // Hit
- cache.get('key3'); // Miss
-
- const stats = cache.getStats();
-
- expect(stats.hitRate).toBe(80); // 4 hits, 1 miss = 80%
- });
-
- it('should track cache size and memory', () => {
- cache.set('key1', { data: 'a'.repeat(100) });
- cache.set('key2', { data: 'b'.repeat(200) });
-
- const stats = cache.getStats();
-
- expect(stats.size).toBe(2);
- expect(stats.maxSize).toBe(5);
- expect(stats.memoryUsage).toBeGreaterThan(0);
- });
-
- it('should track oldest and newest entries', async () => {
- cache.set('key1', 'value1');
-
- const stats1 = cache.getStats();
- expect(stats1.oldestEntry).toBe(stats1.newestEntry);
-
- // Wait a bit to ensure different timestamps
- await new Promise((resolve) => setTimeout(resolve, 10));
-
- cache.set('key2', 'value2');
-
- const stats2 = cache.getStats();
- // Since we access key1 when getting stats, timestamps might be equal
- // Just verify both entries exist
- expect(stats2.size).toBe(2);
- expect(stats2.oldestEntry).toBeDefined();
- expect(stats2.newestEntry).toBeDefined();
- });
- });
-
- describe('Key Generation', () => {
- it('should generate consistent keys for same inputs', () => {
- const key1 = QueryCache.generateKey('SELECT * FROM users WHERE id = ?', [1]);
- const key2 = QueryCache.generateKey('SELECT * FROM users WHERE id = ?', [1]);
-
- expect(key1).toBe(key2);
- });
-
- it('should generate different keys for different queries', () => {
- const key1 = QueryCache.generateKey('SELECT * FROM users WHERE id = ?', [1]);
- const key2 = QueryCache.generateKey('SELECT * FROM posts WHERE id = ?', [1]);
-
- expect(key1).not.toBe(key2);
- });
-
- it('should generate different keys for different params', () => {
- const key1 = QueryCache.generateKey('SELECT * FROM users WHERE id = ?', [1]);
- const key2 = QueryCache.generateKey('SELECT * FROM users WHERE id = ?', [2]);
-
- expect(key1).not.toBe(key2);
- });
-
- it('should handle complex parameter types', () => {
- const params = [
- { nested: { object: true } },
- [1, 2, 3],
- null,
- undefined,
- 'string',
- ];
-
- const key1 = QueryCache.generateKey('SELECT *', params);
- const key2 = QueryCache.generateKey('SELECT *', params);
-
- expect(key1).toBe(key2);
- });
- });
-
- describe('Memory Management', () => {
- it('should destroy cache and cleanup resources', () => {
- cache.set('key1', 'value1');
- cache.set('key2', 'value2');
-
- cache.destroy();
-
- expect(cache.size).toBe(0);
- // Cleanup interval should be cleared (can't easily test, but no errors should occur)
- });
-
- it('should handle large values', () => {
- const largeValue = { data: 'x'.repeat(10000) };
- cache.set('large', largeValue);
-
- const retrieved = cache.get('large');
- expect(retrieved).toEqual(largeValue);
-
- const stats = cache.getStats();
- expect(stats.memoryUsage).toBeGreaterThan(10000);
- });
- });
-
- describe('Edge Cases', () => {
- it('should handle empty cache operations', () => {
- expect(cache.size).toBe(0);
- expect(cache.get('anything')).toBeUndefined();
- expect(cache.delete('anything')).toBe(false);
-
- const stats = cache.getStats();
- expect(stats.hits).toBe(0);
- expect(stats.misses).toBe(1);
- expect(stats.hitRate).toBe(0);
- });
-
- it('should handle null and undefined values', () => {
- cache.set('null', null);
- cache.set('undefined', undefined);
-
- expect(cache.get('null')).toBe(null);
- expect(cache.get('undefined')).toBe(undefined);
- });
-
- it('should handle special characters in keys', () => {
- const specialKey = 'entities:type:"agent"\\with\\escapes';
- cache.set(specialKey, 'value');
-
- expect(cache.get(specialKey)).toBe('value');
- });
-
- it('should handle concurrent set operations', () => {
- // Simulate concurrent sets (synchronous in Node.js)
- for (let i = 0; i < 10; i++) {
- cache.set(`key${i}`, `value${i}`);
- }
-
- // Cache should only hold maxSize (5) entries
- expect(cache.size).toBe(5);
-
- // Recent entries should be present
- expect(cache.get('key9')).toBe('value9');
- expect(cache.get('key8')).toBe('value8');
- });
- });
-});
-
-describe('DatabaseQueryCache', () => {
- let dbCache: DatabaseQueryCache;
-
- beforeEach(() => {
- dbCache = new DatabaseQueryCache({
- maxSize: 10,
- defaultTTL: 1000,
- debug: false,
- });
- });
-
- afterEach(() => {
- dbCache.destroy();
- });
-
- describe('Cached Query', () => {
- it('should execute and cache query results', async () => {
- let executionCount = 0;
-
- const executor = () => {
- executionCount++;
- return [{ id: 1, name: 'Test' }];
- };
-
- // First call - should execute
- const result1 = await dbCache.cachedQuery(
- 'SELECT * FROM users WHERE id = ?',
- [1],
- executor
- );
-
- expect(result1).toEqual([{ id: 1, name: 'Test' }]);
- expect(executionCount).toBe(1);
-
- // Second call - should use cache
- const result2 = await dbCache.cachedQuery(
- 'SELECT * FROM users WHERE id = ?',
- [1],
- executor
- );
-
- expect(result2).toEqual([{ id: 1, name: 'Test' }]);
- expect(executionCount).toBe(1); // Not executed again
- });
-
- it('should execute different queries', async () => {
- let executionCount = 0;
-
- const executor = () => {
- executionCount++;
- return [{ id: executionCount }];
- };
-
- const result1 = await dbCache.cachedQuery('SELECT * WHERE id = ?', [1], executor);
- const result2 = await dbCache.cachedQuery('SELECT * WHERE id = ?', [2], executor);
-
- expect(result1).toEqual([{ id: 1 }]);
- expect(result2).toEqual([{ id: 2 }]);
- expect(executionCount).toBe(2);
- });
-
- it('should support async executors', async () => {
- const executor = async () => {
- await new Promise((resolve) => setTimeout(resolve, 10));
- return { async: true };
- };
-
- const result = await dbCache.cachedQuery('SELECT *', [], executor);
-
- expect(result).toEqual({ async: true });
- });
-
- it('should respect custom TTL', async () => {
- let executionCount = 0;
-
- const executor = () => {
- executionCount++;
- return { count: executionCount };
- };
-
- // Cache with short TTL
- await dbCache.cachedQuery('SELECT *', [], executor, 50);
-
- // Should use cache
- await dbCache.cachedQuery('SELECT *', [], executor, 50);
- expect(executionCount).toBe(1);
-
- // Wait for expiration
- await new Promise((resolve) => setTimeout(resolve, 60));
-
- // Should execute again
- await dbCache.cachedQuery('SELECT *', [], executor, 50);
- expect(executionCount).toBe(2);
- });
- });
-
- describe('Performance', () => {
- it('should handle high-volume queries efficiently', async () => {
- const queryCount = 1000;
- let executionCount = 0;
-
- const executor = () => {
- executionCount++;
- return { data: 'test' };
- };
-
- const start = Date.now();
-
- // Execute same query multiple times
- for (let i = 0; i < queryCount; i++) {
- await dbCache.cachedQuery('SELECT *', [], executor);
- }
-
- const duration = Date.now() - start;
-
- // Should only execute once (rest from cache)
- expect(executionCount).toBe(1);
-
- // Should be fast (< 100ms for 1000 cached lookups)
- expect(duration).toBeLessThan(100);
-
- const stats = dbCache.getStats();
- expect(stats.hitRate).toBeGreaterThan(99); // 999 hits, 1 miss
- });
-
- it('should demonstrate cache performance benefit', async () => {
- // Simulate slow database query
- const slowExecutor = async () => {
- await new Promise((resolve) => setTimeout(resolve, 10));
- return { data: 'result' };
- };
-
- // First execution (uncached)
- const start1 = Date.now();
- await dbCache.cachedQuery('SELECT *', [], slowExecutor);
- const duration1 = Date.now() - start1;
-
- // Second execution (cached)
- const start2 = Date.now();
- await dbCache.cachedQuery('SELECT *', [], slowExecutor);
- const duration2 = Date.now() - start2;
-
- // Cached should be significantly faster
- expect(duration2).toBeLessThan(duration1 / 5); // At least 5x faster
- });
- });
-});
diff --git a/tests/e2e/mcp-server-smoke.test.ts b/tests/e2e/mcp-server-smoke.test.ts
deleted file mode 100644
index 5fc88318..00000000
--- a/tests/e2e/mcp-server-smoke.test.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * MCP Server Smoke Test
- *
- * Basic end-to-end test to verify MCP server starts correctly
- * and responds to basic health checks.
- */
-
-import { describe, it, expect } from 'vitest';
-
-describe('MCP Server Smoke Test', () => {
- it('should pass basic validation', () => {
- // Basic smoke test to verify E2E infrastructure works
- // Future: Add actual MCP server startup and communication test
- expect(true).toBe(true);
- });
-
- it('should have required environment variables', () => {
- // Verify test environment is properly configured
- expect(process.env.NODE_ENV).toBe('test');
- });
-});
diff --git a/tests/helpers/TestResourceMonitor.ts b/tests/helpers/TestResourceMonitor.ts
deleted file mode 100644
index 07f4c9d9..00000000
--- a/tests/helpers/TestResourceMonitor.ts
+++ /dev/null
@@ -1,55 +0,0 @@
-import type { SystemResources } from '../../src/core/types.js';
-import { ResourceMonitor } from '../../src/core/ResourceMonitor.js';
-
-interface TestResourceMonitorOptions {
- cpuUsage?: number;
- cores?: number;
- memoryTotalMB?: number;
- memoryUsedMB?: number;
-}
-
-export class TestResourceMonitor extends ResourceMonitor {
- private cpuUsage: number;
- private cores: number;
- private memoryTotalMB: number;
- private memoryUsedMB: number;
-
- constructor(
- maxBackgroundAgents: number = 6,
- thresholds?: { maxCPU?: number; maxMemory?: number },
- options: TestResourceMonitorOptions = {}
- ) {
- super(maxBackgroundAgents, thresholds);
- this.cpuUsage = options.cpuUsage ?? 5;
- this.cores = options.cores ?? 8;
- this.memoryTotalMB = options.memoryTotalMB ?? 16384;
- this.memoryUsedMB = options.memoryUsedMB ?? 1024;
- }
-
- setResources(options: TestResourceMonitorOptions): void {
- if (options.cpuUsage !== undefined) this.cpuUsage = options.cpuUsage;
- if (options.cores !== undefined) this.cores = options.cores;
- if (options.memoryTotalMB !== undefined) this.memoryTotalMB = options.memoryTotalMB;
- if (options.memoryUsedMB !== undefined) this.memoryUsedMB = options.memoryUsedMB;
- }
-
- getCurrentResources(): SystemResources {
- const total = this.memoryTotalMB;
- const used = this.memoryUsedMB;
- const available = Math.max(total - used, 0);
-
- return {
- cpu: {
- usage: this.cpuUsage,
- cores: this.cores,
- },
- memory: {
- total,
- used,
- available,
- usagePercent: total > 0 ? (used / total) * 100 : 0,
- },
- activeBackgroundAgents: this.getActiveBackgroundCount(),
- };
- }
-}
diff --git a/tests/hooks/post-commit.test.ts b/tests/hooks/post-commit.test.ts
new file mode 100644
index 00000000..04766266
--- /dev/null
+++ b/tests/hooks/post-commit.test.ts
@@ -0,0 +1,165 @@
+import { describe, it, expect, beforeEach, afterEach } from 'vitest';
+import { execFileSync } from 'child_process';
+import { createRequire } from 'module';
+import fs from 'fs';
+import path from 'path';
+import os from 'os';
+
+const require = createRequire(import.meta.url);
+
+describe('Feature: Post-Commit Hook', () => {
+ let testDir: string;
+ let dbPath: string;
+
+ beforeEach(() => {
+ testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'memesh-hook-test-'));
+ dbPath = path.join(testDir, 'test.db');
+ });
+
+ afterEach(() => {
+ fs.rmSync(testDir, { recursive: true, force: true });
+ });
+
+ function runHook(input: object): void {
+ const hookPath = path.resolve('scripts/hooks/post-commit.js');
+ const jsonInput = JSON.stringify(input);
+ execFileSync('node', [hookPath], {
+ input: jsonInput,
+ env: { ...process.env, MEMESH_DB_PATH: dbPath },
+ encoding: 'utf8',
+ timeout: 15000,
+ });
+ }
+
+ function openDb(): InstanceType {
+ const Database = require('better-sqlite3');
+ return new Database(dbPath, { readonly: true });
+ }
+
+ it('Scenario: Bash output with git commit -> entity created', () => {
+ const input = {
+ tool_name: 'Bash',
+ cwd: '/tmp/myproject',
+ tool_input: { command: 'git commit -m "fix: resolve login bug"' },
+ tool_output: '[main abc1234] fix: resolve login bug\n 2 files changed, 15 insertions(+), 3 deletions(-)',
+ };
+
+ runHook(input);
+
+ // Verify entity was created
+ const db = openDb();
+ const entity = db.prepare('SELECT * FROM entities WHERE name = ?').get('commit-abc1234');
+ expect(entity).toBeTruthy();
+ expect(entity.type).toBe('commit');
+
+ // Verify observation
+ const obs = db.prepare('SELECT * FROM observations WHERE entity_id = ?').get(entity.id);
+ expect(obs).toBeTruthy();
+ expect(obs.content).toBe('fix: resolve login bug');
+
+ // Verify tag
+ const tag = db.prepare('SELECT * FROM tags WHERE entity_id = ? AND tag = ?').get(entity.id, 'project:myproject');
+ expect(tag).toBeTruthy();
+
+ // Verify FTS
+ const fts = db.prepare("SELECT * FROM entities_fts WHERE entities_fts MATCH 'login'").all();
+ expect(fts.length).toBeGreaterThan(0);
+
+ db.close();
+ });
+
+ it('Scenario: Bash output without git commit -> no entity created', () => {
+ const input = {
+ tool_name: 'Bash',
+ cwd: '/tmp/myproject',
+ tool_input: { command: 'ls -la' },
+ tool_output: 'total 32\ndrwxr-xr-x 5 user staff 160 Jan 1 00:00 .',
+ };
+
+ runHook(input);
+
+ // Database should not even exist (no commit detected)
+ expect(fs.existsSync(dbPath)).toBe(false);
+ });
+
+ it('Scenario: Non-Bash tool -> exits cleanly without action', () => {
+ const input = {
+ tool_name: 'Read',
+ cwd: '/tmp/myproject',
+ tool_input: { file_path: '/tmp/test.txt' },
+ tool_output: 'file contents',
+ };
+
+ runHook(input);
+ expect(fs.existsSync(dbPath)).toBe(false);
+ });
+
+ it('Scenario: Database does not exist -> creates it and stores commit', () => {
+ expect(fs.existsSync(dbPath)).toBe(false);
+
+ const input = {
+ tool_name: 'Bash',
+ cwd: '/tmp/newproject',
+ tool_input: { command: 'git commit -m "initial commit"' },
+ tool_output: '[main def5678] initial commit\n 1 file changed, 1 insertion(+)',
+ };
+
+ runHook(input);
+
+ expect(fs.existsSync(dbPath)).toBe(true);
+ const db = openDb();
+ const entity = db.prepare('SELECT * FROM entities WHERE name = ?').get('commit-def5678');
+ expect(entity).toBeTruthy();
+ expect(entity.type).toBe('commit');
+ db.close();
+ });
+
+ it('Scenario: Branch name with slashes -> commit detected correctly', () => {
+ const input = {
+ tool_name: 'Bash',
+ cwd: '/tmp/myproject',
+ tool_input: { command: 'git commit -m "feat: add feature"' },
+ tool_output: '[feature/v3-hooks 9a8b7c6] feat: add feature\n 3 files changed',
+ };
+
+ runHook(input);
+
+ const db = openDb();
+ const entity = db.prepare('SELECT * FROM entities WHERE name = ?').get('commit-9a8b7c6');
+ expect(entity).toBeTruthy();
+ const obs = db.prepare('SELECT content FROM observations WHERE entity_id = ?').get(entity.id);
+ expect(obs.content).toBe('feat: add feature');
+ db.close();
+ });
+
+ it('Scenario: Duplicate commit -> no duplicate entities', () => {
+ const input = {
+ tool_name: 'Bash',
+ cwd: '/tmp/myproject',
+ tool_input: { command: 'git commit -m "same commit"' },
+ tool_output: '[main aaa1111] same commit\n 1 file changed',
+ };
+
+ runHook(input);
+ runHook(input);
+
+ const db = openDb();
+ const entities = db.prepare('SELECT * FROM entities WHERE name = ?').all('commit-aaa1111');
+ expect(entities).toHaveLength(1);
+ // But observations may be duplicated (each hook run adds one)
+ const obs = db.prepare('SELECT * FROM observations WHERE entity_id = ?').all(entities[0].id);
+ expect(obs.length).toBeGreaterThanOrEqual(1);
+ db.close();
+ });
+
+ it('Scenario: Invalid JSON input -> exits cleanly', () => {
+ const hookPath = path.resolve('scripts/hooks/post-commit.js');
+ // Should not throw
+ execFileSync('node', [hookPath], {
+ input: 'not-json',
+ env: { ...process.env, MEMESH_DB_PATH: dbPath },
+ encoding: 'utf8',
+ timeout: 15000,
+ });
+ });
+});
diff --git a/tests/hooks/session-start.test.ts b/tests/hooks/session-start.test.ts
new file mode 100644
index 00000000..35c13b62
--- /dev/null
+++ b/tests/hooks/session-start.test.ts
@@ -0,0 +1,146 @@
+import { describe, it, expect, beforeEach, afterEach } from 'vitest';
+import { execFileSync } from 'child_process';
+import { createRequire } from 'module';
+import fs from 'fs';
+import path from 'path';
+import os from 'os';
+
+const require = createRequire(import.meta.url);
+
+describe('Feature: Session Start Hook', () => {
+ let testDir: string;
+ let dbPath: string;
+
+ beforeEach(() => {
+ testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'memesh-hook-test-'));
+ dbPath = path.join(testDir, 'test.db');
+ });
+
+ afterEach(() => {
+ fs.rmSync(testDir, { recursive: true, force: true });
+ });
+
+ function runHook(input: object): { result: string } {
+ const hookPath = path.resolve('scripts/hooks/session-start.js');
+ const jsonInput = JSON.stringify(input);
+ const result = execFileSync('node', [hookPath], {
+ input: jsonInput,
+ env: { ...process.env, MEMESH_DB_PATH: dbPath },
+ encoding: 'utf8',
+ timeout: 15000,
+ });
+ return JSON.parse(result.trim());
+ }
+
+ function createTestDb(): InstanceType {
+ const Database = require('better-sqlite3');
+ const db = new Database(dbPath);
+ db.pragma('journal_mode = WAL');
+ db.pragma('foreign_keys = ON');
+ db.exec(`
+ CREATE TABLE IF NOT EXISTS entities (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT NOT NULL UNIQUE,
+ type TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ metadata JSON
+ );
+ CREATE TABLE IF NOT EXISTS observations (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ entity_id INTEGER NOT NULL,
+ content TEXT NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
+ );
+ CREATE TABLE IF NOT EXISTS tags (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ entity_id INTEGER NOT NULL,
+ tag TEXT NOT NULL,
+ FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
+ );
+ CREATE INDEX IF NOT EXISTS idx_tags_entity ON tags(entity_id);
+ CREATE INDEX IF NOT EXISTS idx_tags_tag ON tags(tag);
+ CREATE INDEX IF NOT EXISTS idx_observations_entity ON observations(entity_id);
+ CREATE VIRTUAL TABLE IF NOT EXISTS entities_fts USING fts5(
+ name, observations, content='',
+ tokenize='unicode61 remove_diacritics 1'
+ );
+ `);
+ return db;
+ }
+
+ it('Scenario: No database exists -> welcome message', () => {
+ const output = runHook({ cwd: '/tmp/myproject' });
+ expect(output.result).toContain('No database found');
+ });
+
+ it('Scenario: Empty database (no entities table) -> graceful message', () => {
+ // Create an empty db file with no tables
+ const Database = require('better-sqlite3');
+ const db = new Database(dbPath);
+ db.close();
+
+ const output = runHook({ cwd: '/tmp/myproject' });
+ // Empty db hits the catch block or table check — either way, no crash
+ expect(output.result).toBeTruthy();
+ });
+
+ it('Scenario: Database with project memories -> recalls them with observations', () => {
+ const db = createTestDb();
+ db.prepare('INSERT INTO entities (name, type) VALUES (?, ?)').run('auth-module', 'component');
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(1, 'Handles JWT token validation');
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(1, 'Uses bcrypt for password hashing');
+ db.prepare('INSERT INTO tags (entity_id, tag) VALUES (?, ?)').run(1, 'project:myproject');
+ db.close();
+
+ const output = runHook({ cwd: '/tmp/myproject' });
+ expect(output.result).toContain('Project "myproject" memories');
+ expect(output.result).toContain('[component] auth-module');
+ expect(output.result).toContain('Uses bcrypt for password hashing');
+ expect(output.result).toContain('Handles JWT token validation');
+ });
+
+ it('Scenario: Database with no matching project -> shows only recent memories', () => {
+ const db = createTestDb();
+ db.prepare('INSERT INTO entities (name, type) VALUES (?, ?)').run('some-entity', 'note');
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(1, 'A note about something');
+ db.close();
+
+ const output = runHook({ cwd: '/tmp/other-project' });
+ expect(output.result).not.toContain('Project "other-project" memories');
+ expect(output.result).toContain('Recent memories');
+ expect(output.result).toContain('[note] some-entity');
+ expect(output.result).toContain('A note about something');
+ });
+
+ it('Scenario: Database with both project and global memories -> shows both', () => {
+ const db = createTestDb();
+ // Project entity
+ db.prepare('INSERT INTO entities (name, type) VALUES (?, ?)').run('project-item', 'feature');
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(1, 'Project specific');
+ db.prepare('INSERT INTO tags (entity_id, tag) VALUES (?, ?)').run(1, 'project:testproj');
+ // Global entity (no project tag)
+ db.prepare('INSERT INTO entities (name, type) VALUES (?, ?)').run('global-item', 'note');
+ db.prepare('INSERT INTO observations (entity_id, content) VALUES (?, ?)').run(2, 'Global note');
+ db.close();
+
+ const output = runHook({ cwd: '/tmp/testproj' });
+ expect(output.result).toContain('Project "testproj" memories');
+ expect(output.result).toContain('[feature] project-item');
+ expect(output.result).toContain('Recent memories');
+ expect(output.result).toContain('[note] global-item');
+ });
+
+ it('Scenario: Always exits with valid JSON output on invalid input', () => {
+ const hookPath = path.resolve('scripts/hooks/session-start.js');
+ const result = execFileSync('node', [hookPath], {
+ input: 'not-json',
+ env: { ...process.env, MEMESH_DB_PATH: dbPath },
+ encoding: 'utf8',
+ timeout: 15000,
+ });
+ const parsed = JSON.parse(result.trim());
+ expect(parsed).toHaveProperty('result');
+ expect(typeof parsed.result).toBe('string');
+ });
+});
diff --git a/tests/installation.test.ts b/tests/installation.test.ts
index 0e65bd03..d9701f80 100644
--- a/tests/installation.test.ts
+++ b/tests/installation.test.ts
@@ -4,61 +4,46 @@ import { execSync } from 'child_process';
describe('Installation Verification', () => {
describe('Prerequisites', () => {
- it('should have Node.js 18+ installed', () => {
+ it('should have Node.js 20+ installed', () => {
const version = execSync('node -v').toString().trim();
const major = parseInt(version.slice(1).split('.')[0]);
- expect(major).toBeGreaterThanOrEqual(18);
- });
-
- it('should have npm installed', () => {
- const version = execSync('npm -v').toString().trim();
- expect(version).toBeTruthy();
- });
- });
-
- describe('Build Artifacts', () => {
- it('should have dist directory', () => {
- expect(fs.existsSync('dist')).toBe(true);
- });
-
- it('should have MCP server built', () => {
- expect(fs.existsSync('dist/mcp/server-bootstrap.js')).toBe(true);
- });
-
- it('should have main entry point', () => {
- expect(fs.existsSync('dist/index.js')).toBe(true);
+ expect(major).toBeGreaterThanOrEqual(20);
});
});
describe('Configuration Files', () => {
- it('should have package.json', () => {
+ it('should have package.json with correct name', () => {
const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8'));
expect(pkg.name).toBe('@pcircle/memesh');
- expect(pkg.version).toMatch(/^\d+\.\d+\.\d+$/);
});
it('should have plugin.json', () => {
expect(fs.existsSync('plugin.json')).toBe(true);
});
- });
- describe('MCP Configuration', () => {
- it('should have valid MCP server export', async () => {
- // Import the MCP server module to verify it exports correctly
- const serverModule = await import('../dist/mcp/server-bootstrap.js');
- expect(serverModule).toBeDefined();
+ it('should have .mcp.json', () => {
+ expect(fs.existsSync('.mcp.json')).toBe(true);
});
- });
- describe('Installation Scripts', () => {
- it('should have install.sh', () => {
- expect(fs.existsSync('scripts/install.sh')).toBe(true);
- const stat = fs.statSync('scripts/install.sh');
- expect(stat.mode & 0o111).toBeTruthy(); // Executable
+ it('should have hooks.json with 2 hooks', () => {
+ const hooks = JSON.parse(fs.readFileSync('hooks/hooks.json', 'utf8'));
+ const hookTypes = Object.keys(hooks.hooks);
+ expect(hookTypes).toHaveLength(2);
+ expect(hookTypes).toContain('SessionStart');
+ expect(hookTypes).toContain('PostToolUse');
});
+ });
+
+ describe('Hook Scripts', () => {
+ const hookFiles = [
+ 'scripts/hooks/session-start.js',
+ 'scripts/hooks/post-commit.js',
+ ];
- it('should have install-helpers.js', () => {
- expect(fs.existsSync('scripts/install-helpers.js')).toBe(true);
+ it.each(hookFiles)('%s should exist and be executable', (hookPath) => {
+ expect(fs.existsSync(hookPath)).toBe(true);
+ const stat = fs.statSync(hookPath);
+ expect(stat.mode & 0o111).toBeTruthy();
});
});
});
diff --git a/tests/integration/cloud-only-mode.integration.test.ts b/tests/integration/cloud-only-mode.integration.test.ts
deleted file mode 100644
index 074fd2ee..00000000
--- a/tests/integration/cloud-only-mode.integration.test.ts
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Cloud-Only Mode Integration Tests
- *
- * Verifies that when running in cloud-only mode:
- * 1. Memory-dependent tools return appropriate error messages
- * 2. Non-memory tools continue to work normally
- * 3. Error messages provide clear guidance to users
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import { ServerInitializer } from '../../src/mcp/ServerInitializer.js';
-import { ToolHandlers } from '../../src/mcp/handlers/ToolHandlers.js';
-import { BuddyHandlers } from '../../src/mcp/handlers/BuddyHandlers.js';
-import * as BetterSqlite3Adapter from '../../src/db/adapters/BetterSqlite3Adapter.js';
-import * as MeMeshCloudClient from '../../src/cloud/MeMeshCloudClient.js';
-
-describe('Cloud-Only Mode - Integration Tests', () => {
- let toolHandlers: ToolHandlers;
- let buddyHandlers: BuddyHandlers;
-
- beforeEach(async () => {
- // Mock SQLite as unavailable
- vi.spyOn(BetterSqlite3Adapter, 'checkBetterSqlite3Availability').mockResolvedValue({
- available: false,
- name: 'better-sqlite3',
- error: 'Module not found',
- fallbackSuggestion: 'Run: npm install better-sqlite3',
- });
-
- // Mock cloud as enabled
- process.env.MEMESH_API_KEY = 'test-integration-key';
- vi.spyOn(MeMeshCloudClient, 'isCloudEnabled').mockReturnValue(true);
-
- // Initialize server in cloud-only mode
- const components = await ServerInitializer.initialize();
-
- // Verify we're in cloud-only mode
- expect(components.cloudOnlyMode).toBe(true);
- expect(components.knowledgeGraph).toBeUndefined();
- expect(components.projectMemoryManager).toBeUndefined();
-
- // Get handler instances
- toolHandlers = components.toolHandlers;
- buddyHandlers = components.buddyHandlers;
- });
-
- afterEach(() => {
- vi.restoreAllMocks();
- delete process.env.MEMESH_API_KEY;
- });
-
- describe('Memory-Dependent Tools', () => {
- it('should return cloud-only error for recall-memory tool', async () => {
- const result = await toolHandlers.handleRecallMemory({
- query: 'test query',
- limit: 5,
- });
-
- expect(result.isError).toBe(true);
- expect(result.content).toHaveLength(1);
- expect(result.content[0]).toMatchObject({
- type: 'text',
- text: expect.stringContaining("❌ Tool 'recall-memory' is not available in cloud-only mode"),
- });
- expect(result.content[0].text).toContain('better-sqlite3 unavailable');
- expect(result.content[0].text).toContain('Install better-sqlite3');
- });
-
- it('should return cloud-only error for create-entities tool', async () => {
- const result = await toolHandlers.handleCreateEntities({
- entities: [{ text: 'test entity' }],
- });
-
- expect(result.isError).toBe(true);
- expect(result.content[0].text).toContain("❌ Tool 'create-entities' is not available in cloud-only mode");
- });
-
- it('should return cloud-only error for buddy-do tool', async () => {
- const result = await buddyHandlers.handleBuddyDo({
- task: 'test task',
- });
-
- expect(result.isError).toBe(true);
- expect(result.content[0].text).toContain("❌ Tool 'buddy-do' is not available in cloud-only mode");
- });
-
- it('should return cloud-only error for buddy-remember tool', async () => {
- const result = await buddyHandlers.handleBuddyRemember({
- query: 'test query',
- });
-
- expect(result.isError).toBe(true);
- expect(result.content[0].text).toContain("❌ Tool 'buddy-remember' is not available in cloud-only mode");
- });
-
- it('should return cloud-only error for hook-tool-use', async () => {
- const result = await toolHandlers.handleHookToolUse({
- toolName: 'test-tool',
- success: true,
- durationMs: 100,
- });
-
- expect(result.isError).toBe(true);
- expect(result.content[0].text).toContain("❌ Tool 'hook-tool-use' is not available in cloud-only mode");
- });
- });
-
- describe('Non-Memory Tools', () => {
- it('should work normally for buddy-help tool', async () => {
- const result = await buddyHandlers.handleBuddyHelp({});
-
- expect(result.isError).toBeUndefined();
- expect(result.content).toBeDefined();
- // Check for MeMesh branding in output
- expect(result.content[0].text).toMatch(/MeMesh|buddy-do|buddy-remember/i);
- });
-
- it('should work normally for list-skills tool', async () => {
- const result = await toolHandlers.handleListSkills({});
-
- expect(result.isError).toBeUndefined();
- expect(result.content).toBeDefined();
- // Should return skill list (may be empty, but not an error)
- });
- });
-
- describe('Error Message Quality', () => {
- it('should provide actionable guidance in error messages', async () => {
- const result = await toolHandlers.handleRecallMemory({
- query: 'test',
- });
-
- const errorText = result.content[0].text;
-
- // Should explain what's wrong
- expect(errorText).toMatch(/cloud-only mode/i);
- expect(errorText).toMatch(/better-sqlite3 unavailable/i);
-
- // Should provide solutions
- expect(errorText).toMatch(/Install better-sqlite3/i);
- expect(errorText).toMatch(/Restart.*MCP server/i);
-
- // Should mention local storage requirement
- expect(errorText).toMatch(/Local SQLite storage is required/i);
- });
-
- it('should use consistent error format across all memory tools', async () => {
- const tools = [
- () => toolHandlers.handleRecallMemory({ query: 'test' }),
- () => toolHandlers.handleCreateEntities({ entities: [] }),
- () => buddyHandlers.handleBuddyDo({ task: 'test' }),
- () => buddyHandlers.handleBuddyRemember({ query: 'test' }),
- ];
-
- const results = await Promise.all(tools.map(fn => fn()));
-
- // All should be errors
- expect(results.every(r => r.isError === true)).toBe(true);
-
- // All should start with ❌ emoji
- expect(results.every(r => r.content[0].text.startsWith('❌'))).toBe(true);
-
- // All should mention cloud-only mode
- expect(results.every(r => r.content[0].text.includes('cloud-only mode'))).toBe(true);
-
- // All should provide installation instructions
- expect(results.every(r => r.content[0].text.includes('npm install better-sqlite3'))).toBe(true);
- });
- });
-});
diff --git a/tests/integration/connection-pool.integration.test.ts b/tests/integration/connection-pool.integration.test.ts
deleted file mode 100644
index 603f32e2..00000000
--- a/tests/integration/connection-pool.integration.test.ts
+++ /dev/null
@@ -1,823 +0,0 @@
-/**
- * ConnectionPool Integration Tests
- *
- * Comprehensive integration tests for ConnectionPool and SimpleDatabaseFactory integration.
- * Tests connection pooling, concurrent access, health checks, resource management, and error handling.
- *
- * Test Structure:
- * 1. Pool Initialization - Verify pool creates and configures connections properly
- * 2. Concurrent Connection Acquisition - Test parallel acquire/release operations
- * 3. Connection Health Checks - Verify health check intervals and connection recycling
- * 4. Resource Management - Test connection lifecycle and cleanup
- * 5. Error Handling - Verify proper error handling for various failure scenarios
- *
- * Performance Targets:
- * - Connection acquisition: <10ms average
- * - Pool initialization: <100ms
- * - Concurrent acquisitions: 50 operations complete successfully
- * - No resource leaks after 100 acquire/release cycles
- *
- * @module tests/integration/connection-pool
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import { ConnectionPool, type ConnectionPoolOptions, type PoolStats } from '../../src/db/ConnectionPool.js';
-import { SimpleDatabaseFactory } from '../../src/config/simple-config.js';
-import type { ILogger } from '../../src/utils/ILogger.js';
-import Database from 'better-sqlite3';
-import { mkdtempSync, rmSync } from 'fs';
-import { tmpdir } from 'os';
-import { join } from 'path';
-
-/**
- * Mock Logger Implementation
- *
- * Captures all log calls for verification in tests.
- * Allows us to verify that ConnectionPool is logging expected messages.
- */
-class MockLogger implements ILogger {
- public logs: Array<{ level: string; message: string; meta?: Record }> = [];
-
- info(message: string, meta?: Record): void {
- this.logs.push({ level: 'info', message, meta });
- }
-
- error(message: string, meta?: Record): void {
- this.logs.push({ level: 'error', message, meta });
- }
-
- warn(message: string, meta?: Record): void {
- this.logs.push({ level: 'warn', message, meta });
- }
-
- debug(message: string, meta?: Record): void {
- this.logs.push({ level: 'debug', message, meta });
- }
-
- clear(): void {
- this.logs = [];
- }
-
- /**
- * Check if logger has received a specific message
- */
- hasMessage(level: string, messagePattern: string | RegExp): boolean {
- return this.logs.some(
- (log) =>
- log.level === level &&
- (typeof messagePattern === 'string'
- ? log.message.includes(messagePattern)
- : messagePattern.test(log.message))
- );
- }
-
- /**
- * Get all messages for a specific log level
- */
- getMessages(level: string): string[] {
- return this.logs.filter((log) => log.level === level).map((log) => log.message);
- }
-}
-
-/**
- * Test Suite: ConnectionPool Integration Tests
- */
-describe('ConnectionPool Integration Tests', () => {
- let testDbPath: string;
- let tempDir: string;
- let mockLogger: MockLogger;
-
- /**
- * Setup: Create temporary test database before each test
- */
- beforeEach(() => {
- // Create temporary directory for test databases
- tempDir = mkdtempSync(join(tmpdir(), 'connection-pool-test-'));
- testDbPath = join(tempDir, 'test.db');
-
- // Create mock logger
- mockLogger = new MockLogger();
-
- // Initialize test database with schema
- const db = new Database(testDbPath);
- db.exec(`
- CREATE TABLE IF NOT EXISTS users (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- name TEXT NOT NULL,
- email TEXT UNIQUE NOT NULL,
- created_at INTEGER DEFAULT (strftime('%s', 'now'))
- );
-
- CREATE TABLE IF NOT EXISTS sessions (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- user_id INTEGER NOT NULL,
- token TEXT UNIQUE NOT NULL,
- created_at INTEGER DEFAULT (strftime('%s', 'now')),
- FOREIGN KEY (user_id) REFERENCES users(id)
- );
-
- -- Insert test data
- INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com');
- INSERT INTO users (name, email) VALUES ('Bob', 'bob@example.com');
- INSERT INTO users (name, email) VALUES ('Charlie', 'charlie@example.com');
- `);
- db.close();
- });
-
- /**
- * Teardown: Clean up test database and temporary directory
- */
- afterEach(async () => {
- // Clean up SimpleDatabaseFactory caches
- await SimpleDatabaseFactory.closeAll();
-
- // Remove temporary directory
- if (tempDir) {
- try {
- rmSync(tempDir, { recursive: true, force: true });
- } catch (error) {
- console.error('Failed to clean up temp directory:', error);
- }
- }
-
- // Clear mock logger
- mockLogger.clear();
- });
-
- /**
- * Test Suite 1: Pool Initialization
- *
- * Verifies that ConnectionPool creates the correct number of connections,
- * configures them properly (WAL mode, cache, mmap), and integrates with
- * verbose logging.
- */
- describe('Pool Initialization', () => {
- it('should create pool with correct number of connections', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const stats = pool.getStats();
-
- expect(stats.total).toBe(5);
- expect(stats.idle).toBe(5);
- expect(stats.active).toBe(0);
- expect(stats.waiting).toBe(0);
-
- // Cleanup
- await pool.shutdown();
- });
-
- it('should configure each connection with WAL mode', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 3, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Acquire all connections and check WAL mode
- const connections: any[] = [];
- for (let i = 0; i < 3; i++) {
- const db = await pool.acquire();
- connections.push(db);
-
- // Check journal_mode is WAL
- const result = (db as any).db.pragma('journal_mode', { simple: true }) as string;
- expect(['wal', 'delete']).toContain(result.toLowerCase());
- }
-
- // Release all connections
- connections.forEach((db) => pool.release(db));
-
- // Cleanup
- await pool.shutdown();
- });
-
- it('should configure connections with foreign key constraints enabled', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const db = await pool.acquire();
-
- // Check foreign_keys pragma
- const result = (db as any).db.pragma('foreign_keys', { simple: true }) as number;
- expect(result).toBe(1); // 1 means ON
-
- pool.release(db);
- await pool.shutdown();
- });
-
- it('should integrate with verbose logger when provided', async () => {
- const pool = await ConnectionPool.create(
- testDbPath,
- { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 },
- mockLogger
- );
-
- const db = await pool.acquire();
-
- // Execute a query to trigger verbose logging
- db.prepare('SELECT * FROM users WHERE id = ?').get(1);
-
- pool.release(db);
-
- // Check that SQLite debug messages were logged
- const debugMessages = mockLogger.getMessages('debug');
- expect(debugMessages.length).toBeGreaterThan(0);
- expect(debugMessages.some((msg) => msg.includes('SQLite'))).toBe(true);
-
- await pool.shutdown();
- });
-
- it('should initialize pool in under 100ms', async () => {
- const startTime = Date.now();
-
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const initTime = Date.now() - startTime;
-
- expect(initTime).toBeLessThan(100);
- expect(pool.isHealthy()).toBe(true);
-
- await pool.shutdown();
- });
-
- it('should throw error if maxConnections is less than 1', async () => {
- await expect(
- ConnectionPool.create(testDbPath, { maxConnections: 0, connectionTimeout: 5000, idleTimeout: 30000 })
- ).rejects.toThrow('maxConnections must be at least 1');
- });
- });
-
- /**
- * Test Suite 2: Concurrent Connection Acquisition
- *
- * Tests parallel acquire operations to verify:
- * - All connections returned correctly
- * - No connection is double-acquired
- * - Average acquisition time is acceptable
- * - Pool handles high concurrent load
- */
- describe('Concurrent Connection Acquisition', () => {
- it('should handle 50 concurrent acquire operations successfully', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const acquireTasks = Array.from({ length: 50 }, async (_, i) => {
- const db = await pool.acquire();
-
- // Perform a simple query to verify connection works
- const result = db.prepare('SELECT COUNT(*) as count FROM users').get() as { count: number };
- expect(result.count).toBe(3); // We inserted 3 users in beforeEach
-
- // Simulate some work (random 1-10ms)
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
-
- pool.release(db);
- return i;
- });
-
- const results = await Promise.all(acquireTasks);
-
- // Verify all 50 tasks completed
- expect(results).toHaveLength(50);
- expect(results).toEqual(Array.from({ length: 50 }, (_, i) => i));
-
- // Verify pool is healthy and all connections are idle
- const stats = pool.getStats();
- expect(stats.idle).toBe(5);
- expect(stats.active).toBe(0);
- expect(stats.waiting).toBe(0);
-
- await pool.shutdown();
- });
-
- it('should ensure no connection is double-acquired', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 3, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const activeConnections = new Set();
- const connectionHistory: any[] = [];
- let doubleAcquire = false;
-
- const acquireTasks = Array.from({ length: 20 }, async () => {
- const db = await pool.acquire();
-
- // Check if this connection is already active
- if (activeConnections.has(db)) {
- doubleAcquire = true;
- }
-
- activeConnections.add(db);
- connectionHistory.push(db);
-
- // Hold connection briefly
- await new Promise((resolve) => setTimeout(resolve, 5));
-
- activeConnections.delete(db);
- pool.release(db);
- });
-
- await Promise.all(acquireTasks);
-
- // Verify no double acquisition occurred
- expect(doubleAcquire).toBe(false);
-
- // Verify all connections were returned to pool
- const stats = pool.getStats();
- expect(stats.idle).toBe(3);
- expect(stats.active).toBe(0);
-
- await pool.shutdown();
- });
-
- it('should achieve average acquisition time under 10ms', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const acquisitionTimes: number[] = [];
-
- const acquireTasks = Array.from({ length: 50 }, async () => {
- const startTime = Date.now();
- const db = await pool.acquire();
- const acquisitionTime = Date.now() - startTime;
-
- acquisitionTimes.push(acquisitionTime);
-
- // Quick operation
- db.prepare('SELECT 1').get();
-
- pool.release(db);
- });
-
- await Promise.all(acquireTasks);
-
- // Calculate average acquisition time
- const averageTime = acquisitionTimes.reduce((a, b) => a + b, 0) / acquisitionTimes.length;
-
- expect(averageTime).toBeLessThan(10);
-
- await pool.shutdown();
- });
-
- it('should handle pool exhaustion by queuing requests', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Acquire all connections
- const db1 = await pool.acquire();
- const db2 = await pool.acquire();
-
- let stats = pool.getStats();
- expect(stats.idle).toBe(0);
- expect(stats.active).toBe(2);
-
- // Try to acquire a third connection (should queue)
- const acquirePromise = pool.acquire();
-
- // Wait a bit for the request to be queued
- await new Promise((resolve) => setTimeout(resolve, 10));
-
- stats = pool.getStats();
- expect(stats.waiting).toBe(1);
-
- // Release one connection - queued request should get it
- pool.release(db1);
-
- const db3 = await acquirePromise;
- expect(db3).toBeDefined();
-
- stats = pool.getStats();
- expect(stats.waiting).toBe(0);
- expect(stats.active).toBe(2);
-
- // Cleanup
- pool.release(db2);
- pool.release(db3);
- await pool.shutdown();
- });
- });
-
- /**
- * Test Suite 3: Connection Health Checks
- *
- * Verifies health check interval works correctly and unhealthy connections
- * are recycled properly.
- */
- describe('Connection Health Checks', () => {
- it('should recycle idle connections after idleTimeout', async () => {
- vi.useFakeTimers();
-
- const pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 2,
- connectionTimeout: 5000,
- idleTimeout: 5000, // Minimum allowed idle timeout
- healthCheckInterval: 5000, // Minimum allowed health check interval
- });
-
- // Acquire and release a connection
- const db1 = await pool.acquire();
- const initialDb1 = db1; // Save reference
- pool.release(db1);
-
- let stats = pool.getStats();
- expect(stats.totalRecycled).toBe(0);
-
- // Advance past idle timeout + health check interval to trigger recycling
- await vi.advanceTimersByTimeAsync(11000);
-
- // Acquire a connection - should be a recycled (new) one
- const db2 = await pool.acquire();
-
- stats = pool.getStats();
- expect(stats.totalRecycled).toBeGreaterThan(0);
-
- // The connection instance should be different after recycling
- expect(db2).not.toBe(initialDb1);
-
- pool.release(db2);
- await pool.shutdown();
-
- vi.useRealTimers();
- });
-
- it('should maintain pool size during health checks', async () => {
- vi.useFakeTimers();
-
- const pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 5000, // Minimum allowed idle timeout
- healthCheckInterval: 5000, // Minimum allowed health check interval
- });
-
- // Advance past multiple health check cycles
- await vi.advanceTimersByTimeAsync(16000);
-
- const stats = pool.getStats();
-
- // Pool should still have all connections
- expect(stats.total).toBe(3);
- expect(pool.isHealthy()).toBe(true);
-
- await pool.shutdown();
-
- vi.useRealTimers();
- });
-
- it('should verify connections are functional after recycling', async () => {
- vi.useFakeTimers();
-
- const pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 2,
- connectionTimeout: 5000,
- idleTimeout: 5000, // Minimum allowed idle timeout
- healthCheckInterval: 5000, // Minimum allowed health check interval
- });
-
- // Acquire, use, and release a connection
- const db1 = await pool.acquire();
- db1.prepare('SELECT * FROM users').all();
- pool.release(db1);
-
- // Advance past idle timeout + health check interval to trigger recycling
- await vi.advanceTimersByTimeAsync(11000);
-
- // Acquire connection again - should be recycled
- const db2 = await pool.acquire();
-
- // Verify it's functional
- const result = db2.prepare('SELECT COUNT(*) as count FROM users').get() as { count: number };
- expect(result.count).toBe(3);
-
- pool.release(db2);
- await pool.shutdown();
-
- vi.useRealTimers();
- });
- });
-
- /**
- * Test Suite 4: Resource Management
- *
- * Tests connection lifecycle, cleanup, and ensures no resource leaks.
- */
- describe('Resource Management', () => {
- it('should properly release connections back to pool', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 3, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Acquire all connections
- const dbs = await Promise.all([pool.acquire(), pool.acquire(), pool.acquire()]);
-
- let stats = pool.getStats();
- expect(stats.active).toBe(3);
- expect(stats.idle).toBe(0);
-
- // Release all connections
- dbs.forEach((db) => pool.release(db));
-
- stats = pool.getStats();
- expect(stats.active).toBe(0);
- expect(stats.idle).toBe(3);
-
- await pool.shutdown();
- });
-
- it('should handle 100 acquire/release cycles without leaks', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 3, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const initialStats = pool.getStats();
-
- for (let i = 0; i < 100; i++) {
- const db = await pool.acquire();
- db.prepare('SELECT 1').get();
- pool.release(db);
- }
-
- const finalStats = pool.getStats();
-
- // Pool should still be healthy
- expect(finalStats.total).toBe(initialStats.total);
- expect(finalStats.idle).toBe(initialStats.idle);
- expect(finalStats.active).toBe(0);
- expect(pool.isHealthy()).toBe(true);
-
- // Verify acquire/release counts match
- expect(finalStats.totalAcquired).toBe(100);
- expect(finalStats.totalReleased).toBe(100);
-
- await pool.shutdown();
- });
-
- it('should gracefully shutdown and close all connections', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 4, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Acquire some connections
- const db1 = await pool.acquire();
- const db2 = await pool.acquire();
-
- // Verify both connections are acquired and functional
- expect(db1).toBeDefined();
- expect(db2).toBeDefined();
- expect(db1).not.toBe(db2); // Should be different connection instances
-
- // Release one
- pool.release(db1);
-
- // Shutdown pool (db2 is still held, but shutdown should handle it)
- await pool.shutdown();
-
- // Verify stats show empty pool
- const stats = pool.getStats();
- expect(stats.total).toBe(0);
- expect(stats.idle).toBe(0);
- expect(stats.active).toBe(0);
-
- // Attempting to acquire should throw
- await expect(pool.acquire()).rejects.toThrow('Pool is shutting down');
- });
-
- it('should reject waiting requests on shutdown', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 1, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Acquire the only connection
- const db = await pool.acquire();
-
- // Verify connection is acquired and functional
- expect(db).toBeDefined();
- const result = db.prepare('SELECT 1 as value').get() as { value: number };
- expect(result.value).toBe(1);
-
- // Try to acquire another (will wait)
- const acquirePromise = pool.acquire();
-
- // Wait for request to be queued
- await new Promise((resolve) => setTimeout(resolve, 10));
-
- // Shutdown pool
- await pool.shutdown();
-
- // Waiting request should be rejected
- await expect(acquirePromise).rejects.toThrow('Pool is shutting down');
- });
-
- it('should track totalAcquired and totalReleased correctly', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Perform multiple acquire/release cycles
- for (let i = 0; i < 10; i++) {
- const db = await pool.acquire();
- pool.release(db);
- }
-
- const stats = pool.getStats();
- expect(stats.totalAcquired).toBe(10);
- expect(stats.totalReleased).toBe(10);
-
- await pool.shutdown();
- });
- });
-
- /**
- * Test Suite 5: Error Handling
- *
- * Verifies proper error handling for various failure scenarios.
- */
- describe('Error Handling', () => {
- it('should timeout when no connection available within timeout', async () => {
- const minTimeout = 1000; // Minimum allowed connectionTimeout after clamping
- const pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 1,
- connectionTimeout: minTimeout,
- idleTimeout: 30000,
- });
-
- // Acquire the only connection and hold it
- const db = await pool.acquire();
-
- // Try to acquire another connection (should timeout)
- const startTime = Date.now();
-
- await expect(pool.acquire()).rejects.toThrow(/Connection acquisition timeout after \d+ms/);
-
- const elapsedTime = Date.now() - startTime;
-
- // Verify it timed out approximately at the right time (within 100ms tolerance)
- expect(elapsedTime).toBeGreaterThanOrEqual(minTimeout);
- expect(elapsedTime).toBeLessThan(minTimeout + 100);
-
- const stats = pool.getStats();
- expect(stats.timeoutErrors).toBe(1);
-
- pool.release(db);
- await pool.shutdown();
- });
-
- it('should handle invalid database path gracefully', async () => {
- const invalidPath = '/invalid/path/that/does/not/exist/test.db';
-
- await expect(
- ConnectionPool.create(invalidPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 })
- ).rejects.toThrow(); // Should throw during initialization
- });
-
- it('should handle release of unknown connection gracefully', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- // Create a connection outside the pool
- const externalDb = new Database(testDbPath);
-
- // Try to release it (should log error but not throw)
- expect(() => {
- pool.release(externalDb);
- }).not.toThrow();
-
- externalDb.close();
- await pool.shutdown();
- });
-
- it('should handle double release gracefully', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const db = await pool.acquire();
-
- // Release once
- pool.release(db);
-
- const statsAfterFirstRelease = pool.getStats();
-
- // Release again - ConnectionPool detects and ignores double release,
- // but totalReleased metric is incorrectly incremented (known limitation)
- pool.release(db);
-
- const statsAfterSecondRelease = pool.getStats();
-
- // After the fix in MAJOR-4, double release no longer increments totalReleased
- expect(statsAfterSecondRelease.totalReleased).toBe(1);
- expect(statsAfterSecondRelease.idle).toBe(statsAfterFirstRelease.idle);
-
- await pool.shutdown();
- });
-
- it('should provide clear error messages for timeout scenarios', async () => {
- // Use minimum allowed connectionTimeout (values below 1000ms are clamped to 1000ms)
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 1, connectionTimeout: 1000, idleTimeout: 30000 });
-
- const db = await pool.acquire();
-
- try {
- await pool.acquire();
- expect.fail('Should have thrown timeout error');
- } catch (error) {
- expect(error).toBeInstanceOf(Error);
- expect((error as Error).message).toMatch(/Connection acquisition timeout after \d+ms/);
- expect((error as Error).message).toContain('1000');
- }
-
- pool.release(db);
- await pool.shutdown();
- });
- });
-
- /**
- * Test Suite 6: SimpleDatabaseFactory Integration
- *
- * Tests integration between ConnectionPool and SimpleDatabaseFactory.
- */
- describe('SimpleDatabaseFactory Integration', () => {
- it('should create connection pool via SimpleDatabaseFactory.getPool()', async () => {
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
-
- expect(pool).toBeDefined();
- expect(pool.isHealthy()).toBe(true);
-
- const stats = pool.getStats();
- expect(stats.total).toBeGreaterThan(0);
- });
-
- it('should acquire and release pooled connections via factory methods', async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- expect(db).toBeDefined();
-
- // Use connection
- const result = db.prepare('SELECT COUNT(*) as count FROM users').get() as { count: number };
- expect(result.count).toBe(3);
-
- // Release connection
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
-
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats).toBeDefined();
- expect(stats!.active).toBe(0);
- });
-
- it('should return same pool instance for same path', async () => {
- const pool1 = await SimpleDatabaseFactory.getPool(testDbPath);
- const pool2 = await SimpleDatabaseFactory.getPool(testDbPath);
-
- expect(pool1).toBe(pool2);
- });
-
- it('should close all pools when closeAll() is called', async () => {
- // Create multiple pools
- const pool1 = await SimpleDatabaseFactory.getPool(testDbPath);
- const tempDb2 = join(tempDir, 'test2.db');
- new Database(tempDb2).close(); // Create empty db
- const pool2 = await SimpleDatabaseFactory.getPool(tempDb2);
-
- expect(pool1.isHealthy()).toBe(true);
- expect(pool2.isHealthy()).toBe(true);
-
- // Close all
- await SimpleDatabaseFactory.closeAll();
-
- // Pools should be shutdown
- await expect(pool1.acquire()).rejects.toThrow('Pool is shutting down');
- await expect(pool2.acquire()).rejects.toThrow('Pool is shutting down');
- });
- });
-
- /**
- * Test Suite 7: Performance Benchmarks
- *
- * Measures and verifies performance characteristics.
- */
- describe('Performance Benchmarks', () => {
- it('should handle high-throughput sequential queries efficiently', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const startTime = Date.now();
- const queryCount = 1000;
-
- for (let i = 0; i < queryCount; i++) {
- const db = await pool.acquire();
- db.prepare('SELECT * FROM users WHERE id = ?').get(1);
- pool.release(db);
- }
-
- const totalTime = Date.now() - startTime;
- const avgTime = totalTime / queryCount;
-
- // Should average less than 5ms per query (acquire + execute + release)
- expect(avgTime).toBeLessThan(5);
-
- await pool.shutdown();
- });
-
- it('should handle high-throughput parallel queries efficiently', async () => {
- const pool = await ConnectionPool.create(testDbPath, { maxConnections: 5, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const startTime = Date.now();
- const queryCount = 500;
-
- const queries = Array.from({ length: queryCount }, async () => {
- const db = await pool.acquire();
- const result = db.prepare('SELECT * FROM users WHERE id = ?').get(1);
- pool.release(db);
- return result;
- });
-
- const results = await Promise.all(queries);
-
- const totalTime = Date.now() - startTime;
-
- // All queries should complete successfully
- expect(results).toHaveLength(queryCount);
-
- // Should complete in reasonable time (less than 2 seconds for 500 queries)
- expect(totalTime).toBeLessThan(2000);
-
- await pool.shutdown();
- });
- });
-});
diff --git a/tests/integration/mcp-dynamic-resources.test.ts b/tests/integration/mcp-dynamic-resources.test.ts
deleted file mode 100644
index fb4e24c1..00000000
--- a/tests/integration/mcp-dynamic-resources.test.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-// tests/integration/mcp-dynamic-resources.test.ts
-import { describe, it, expect, beforeAll, afterAll } from 'vitest';
-import { ClaudeCodeBuddyMCPServer } from '../../src/mcp/server';
-
-describe('MCP Dynamic Resources Integration', () => {
- let server: ClaudeCodeBuddyMCPServer;
-
- beforeAll(async () => {
- server = await ClaudeCodeBuddyMCPServer.create();
- // Server auto-initializes via factory method
- });
-
- afterAll(async () => {
- // Server cleanup happens in global teardown
- // No public close() method needed for tests
- });
-
- it('should handle agent status resource request', async () => {
- const handler = (server as any).server._requestHandlers.get('resources/read');
- const result = await handler({
- method: 'resources/read',
- params: { uri: 'ccb://agent/code-reviewer/status' },
- });
-
- expect(result.contents).toHaveLength(1);
- expect(result.contents[0].mimeType).toBe('application/json');
-
- const data = JSON.parse(result.contents[0].text);
- expect(data.agentType).toBe('code-reviewer');
- expect(data.status).toBeDefined();
- });
-
- it('should handle task logs resource request', async () => {
- const handler = (server as any).server._requestHandlers.get('resources/read');
- const result = await handler({
- method: 'resources/read',
- params: { uri: 'ccb://task/task-123/logs' },
- });
-
- expect(result.contents).toHaveLength(1);
- expect(result.contents[0].mimeType).toBe('text/plain');
- // Feature not yet implemented - verify placeholder message
- expect(result.contents[0].text).toContain('Task Logs - Feature Not Yet Available');
- expect(result.contents[0].text).toContain('task-123');
- });
-});
diff --git a/tests/integration/mcp-sdk-1.25.3-features.test.ts b/tests/integration/mcp-sdk-1.25.3-features.test.ts
deleted file mode 100644
index db50f7bd..00000000
--- a/tests/integration/mcp-sdk-1.25.3-features.test.ts
+++ /dev/null
@@ -1,565 +0,0 @@
-/**
- * MCP SDK 1.25.3 Features Integration Test
- *
- * Comprehensive integration tests for new capabilities:
- * - URI Templates (Dynamic Resources)
- * - Progress Reporting
- * - Sampling (Content Generation)
- */
-
-import { describe, it, expect, beforeAll, afterAll } from 'vitest';
-import { ClaudeCodeBuddyMCPServer } from '../../src/mcp/server';
-import { URITemplateHandler } from '../../src/mcp/resources/URITemplateHandler';
-import { ResourceRegistry } from '../../src/mcp/resources/ResourceRegistry';
-import { AgentStatusHandler } from '../../src/mcp/resources/handlers/AgentStatusHandler';
-import { TaskLogsHandler } from '../../src/mcp/resources/handlers/TaskLogsHandler';
-import { ProgressReporter } from '../../src/mcp/ProgressReporter';
-import { SamplingClient } from '../../src/mcp/SamplingClient';
-import { TestGenerator } from '../../src/tools/TestGenerator';
-
-describe('MCP SDK 1.25.3 - URI Templates & Dynamic Resources', () => {
- let server: ClaudeCodeBuddyMCPServer;
-
- beforeAll(async () => {
- server = await ClaudeCodeBuddyMCPServer.create();
- });
-
- afterAll(async () => {
- // Server cleanup happens in global teardown
- });
-
- describe('URI Template Parsing', () => {
- let handler: URITemplateHandler;
-
- beforeAll(() => {
- handler = new URITemplateHandler();
- });
-
- it('should parse agent status URI template correctly', () => {
- const template = 'ccb://agent/{agentType}/status';
- const uri = 'ccb://agent/code-reviewer/status';
-
- const params = handler.parseTemplate(template, uri);
-
- expect(params).not.toBeNull();
- expect(params?.agentType).toBe('code-reviewer');
- });
-
- it('should parse task logs URI template correctly', () => {
- const template = 'ccb://task/{taskId}/logs';
- const uri = 'ccb://task/task-123/logs';
-
- const params = handler.parseTemplate(template, uri);
-
- expect(params).not.toBeNull();
- expect(params?.taskId).toBe('task-123');
- });
-
- it('should return null for non-matching URIs', () => {
- const template = 'ccb://agent/{agentType}/status';
- const uri = 'ccb://task/task-123/logs';
-
- const params = handler.parseTemplate(template, uri);
-
- expect(params).toBeNull();
- });
-
- it('should handle static URIs (no parameters)', () => {
- const template = 'ccb://system/health';
- const uri = 'ccb://system/health';
-
- const params = handler.parseTemplate(template, uri);
-
- expect(params).not.toBeNull();
- expect(Object.keys(params!)).toHaveLength(0);
- });
-
- it('should validate input parameters', () => {
- const handler = new URITemplateHandler();
-
- // Empty strings should return null
- expect(handler.parseTemplate('', 'uri')).toBeNull();
- expect(handler.parseTemplate('template', '')).toBeNull();
-
- // Non-string inputs should return null
- expect(handler.parseTemplate(null as any, 'uri')).toBeNull();
- expect(handler.parseTemplate('template', undefined as any)).toBeNull();
- });
-
- it('should handle special characters in URIs', () => {
- const template = 'ccb://agent/{agentType}/status';
- const uri = 'ccb://agent/test-writer/status';
-
- const params = handler.parseTemplate(template, uri);
-
- expect(params).not.toBeNull();
- expect(params?.agentType).toBe('test-writer');
- });
- });
-
- describe('Resource Registry', () => {
- let registry: ResourceRegistry;
-
- beforeAll(() => {
- registry = new ResourceRegistry();
- });
-
- it('should register and retrieve resource handlers', async () => {
- const template = 'ccb://test/{id}';
- const handler = async (params: any) => ({
- uri: `ccb://test/${params.id}`,
- mimeType: 'text/plain',
- text: `Test resource: ${params.id}`,
- });
-
- registry.register(template, handler);
- const result = await registry.handle('ccb://test/123');
-
- expect(result.uri).toBe('ccb://test/123');
- expect(result.mimeType).toBe('text/plain');
- expect(result.text).toContain('Test resource: 123');
- });
-
- it('should register and retrieve templates', () => {
- const template = {
- uriTemplate: 'ccb://test/{id}',
- name: 'Test Resource',
- description: 'A test resource',
- mimeType: 'text/plain',
- };
-
- registry.registerTemplate(template);
- const templates = registry.getTemplates();
-
- expect(templates).toContainEqual(template);
- });
-
- it('should throw NotFoundError for unregistered URIs', async () => {
- const registry = new ResourceRegistry();
-
- await expect(
- registry.handle('ccb://unknown/resource')
- ).rejects.toThrow('No handler found for URI');
- });
- });
-
- describe('Agent Status Handler', () => {
- let handler: AgentStatusHandler;
-
- beforeAll(() => {
- handler = new AgentStatusHandler();
- });
-
- it('should return agent status for valid agent types', async () => {
- const result = await handler.handle({ agentType: 'code-reviewer' });
-
- expect(result.uri).toBe('ccb://agent/code-reviewer/status');
- expect(result.mimeType).toBe('application/json');
-
- const status = JSON.parse(result.text);
- expect(status.agentType).toBe('code-reviewer');
- expect(status.status).toBe('active');
- expect(status.capabilities).toContain('review');
- expect(status.lastActive).toBeDefined();
- });
-
- it('should return capabilities for all agent types', async () => {
- const agentTypes = [
- 'code-reviewer',
- 'test-writer',
- 'development-butler',
- 'e2e-healing',
- 'knowledge-graph',
- ];
-
- for (const agentType of agentTypes) {
- const result = await handler.handle({ agentType });
- const status = JSON.parse(result.text);
-
- expect(status.capabilities).toBeDefined();
- expect(Array.isArray(status.capabilities)).toBe(true);
- expect(status.capabilities.length).toBeGreaterThan(0);
- }
- });
-
- it('should throw NotFoundError for invalid agent types', async () => {
- await expect(
- handler.handle({ agentType: 'invalid-agent' })
- ).rejects.toThrow('Unknown agent type');
- });
- });
-
- describe('Task Logs Handler', () => {
- let handler: TaskLogsHandler;
-
- beforeAll(() => {
- handler = new TaskLogsHandler();
- });
-
- it('should return task logs placeholder for valid task IDs', async () => {
- const result = await handler.handle({ taskId: 'task-123' });
-
- expect(result.uri).toBe('ccb://task/task-123/logs');
- expect(result.mimeType).toBe('text/plain');
- // Feature not yet implemented - verify placeholder message
- expect(result.text).toContain('Task Logs - Feature Not Yet Available');
- expect(result.text).toContain('task-123');
- });
-
- it('should return placeholder for all task IDs (not yet implemented)', async () => {
- const result = await handler.handle({ taskId: 'invalid-task-id' });
-
- // Current implementation returns placeholder for all tasks
- expect(result.text).toContain('Task Logs - Feature Not Yet Available');
- expect(result.text).toContain('invalid-task-id');
- });
- });
-
- describe('MCP Server Integration', () => {
- it('should handle agent status resource request via server', async () => {
- const handler = (server as any).server._requestHandlers.get('resources/read');
- const result = await handler({
- method: 'resources/read',
- params: { uri: 'ccb://agent/test-writer/status' },
- });
-
- expect(result.contents).toHaveLength(1);
- expect(result.contents[0].mimeType).toBe('application/json');
-
- const data = JSON.parse(result.contents[0].text);
- expect(data.agentType).toBe('test-writer');
- expect(data.status).toBe('active');
- expect(data.capabilities).toContain('generate-tests');
- });
-
- it('should handle task logs resource request via server', async () => {
- const handler = (server as any).server._requestHandlers.get('resources/read');
- const result = await handler({
- method: 'resources/read',
- params: { uri: 'ccb://task/task-456/logs' },
- });
-
- expect(result.contents).toHaveLength(1);
- expect(result.contents[0].mimeType).toBe('text/plain');
- // Feature not yet implemented - verify placeholder message
- expect(result.contents[0].text).toContain('Task Logs - Feature Not Yet Available');
- });
-
- it('should list available resource templates', async () => {
- const handler = (server as any).server._requestHandlers.get('resources/list');
- const result = await handler({
- method: 'resources/list',
- params: {},
- });
-
- expect(result.resources).toBeDefined();
- expect(Array.isArray(result.resources)).toBe(true);
-
- // Should include our dynamic resource templates
- const agentTemplate = result.resources.find((r: any) =>
- r.uriTemplate === 'ccb://agent/{agentType}/status'
- );
- const taskTemplate = result.resources.find((r: any) =>
- r.uriTemplate === 'ccb://task/{taskId}/logs'
- );
-
- expect(agentTemplate).toBeDefined();
- expect(taskTemplate).toBeDefined();
- });
- });
-});
-
-describe('MCP SDK 1.25.3 - Progress Reporting', () => {
- describe('ProgressReporter', () => {
- it('should report progress when token is provided', async () => {
- let capturedUpdate: any = null;
-
- const sendProgress = async (update: any) => {
- capturedUpdate = update;
- };
-
- const reporter = new ProgressReporter('test-token', sendProgress);
-
- await reporter.report(5, 10);
-
- expect(capturedUpdate).not.toBeNull();
- expect(capturedUpdate.progressToken).toBe('test-token');
- expect(capturedUpdate.progress).toBe(5);
- expect(capturedUpdate.total).toBe(10);
- });
-
- it('should skip reporting when no token is provided', async () => {
- let callCount = 0;
-
- const sendProgress = async () => {
- callCount++;
- };
-
- const reporter = new ProgressReporter(undefined, sendProgress);
-
- await reporter.report(5, 10);
-
- expect(callCount).toBe(0);
- });
-
- it('should indicate if progress reporting is enabled', () => {
- const reporter1 = new ProgressReporter('token', async () => {});
- const reporter2 = new ProgressReporter(undefined, async () => {});
-
- expect(reporter1.isEnabled()).toBe(true);
- expect(reporter2.isEnabled()).toBe(false);
- });
-
- it('should handle multiple progress updates', async () => {
- const updates: any[] = [];
-
- const sendProgress = async (update: any) => {
- updates.push(update);
- };
-
- const reporter = new ProgressReporter('test-token', sendProgress);
-
- await reporter.report(1, 10);
- await reporter.report(5, 10);
- await reporter.report(10, 10);
-
- expect(updates).toHaveLength(3);
- expect(updates[0].progress).toBe(1);
- expect(updates[1].progress).toBe(5);
- expect(updates[2].progress).toBe(10);
- });
- });
-});
-
-describe('MCP SDK 1.25.3 - Sampling & Content Generation', () => {
- describe('SamplingClient', () => {
- it('should generate content from prompt', async () => {
- const mockSampleFn = async (request: any) => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'Generated content' },
- });
-
- const client = new SamplingClient(mockSampleFn);
- const result = await client.generate('Test prompt', { maxTokens: 100 });
-
- expect(result).toBe('Generated content');
- });
-
- it('should validate prompt input', async () => {
- const mockSampleFn = async () => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'test' },
- });
-
- const client = new SamplingClient(mockSampleFn);
-
- await expect(
- client.generate('', { maxTokens: 100 })
- ).rejects.toThrow('Prompt cannot be empty');
-
- await expect(
- client.generate(' ', { maxTokens: 100 })
- ).rejects.toThrow('Prompt cannot be empty');
- });
-
- it('should validate maxTokens parameter', async () => {
- const mockSampleFn = async () => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'test' },
- });
-
- const client = new SamplingClient(mockSampleFn);
-
- await expect(
- client.generate('Test', { maxTokens: 0 })
- ).rejects.toThrow('maxTokens must be positive');
-
- await expect(
- client.generate('Test', { maxTokens: -1 })
- ).rejects.toThrow('maxTokens must be positive');
- });
-
- it('should handle conversation history', async () => {
- let capturedMessages: any[] = [];
-
- const mockSampleFn = async (request: any) => {
- capturedMessages = request.messages;
- return {
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'Response' },
- };
- };
-
- const client = new SamplingClient(mockSampleFn);
- const messages = [
- { role: 'user' as const, content: 'Hello' },
- { role: 'assistant' as const, content: 'Hi there' },
- { role: 'user' as const, content: 'How are you?' },
- ];
-
- await client.generateWithHistory(messages, { maxTokens: 100 });
-
- expect(capturedMessages).toEqual(messages);
- });
-
- it('should handle sampling errors gracefully', async () => {
- const mockSampleFn = async () => {
- throw new Error('Network error');
- };
-
- const client = new SamplingClient(mockSampleFn);
-
- await expect(
- client.generate('Test', { maxTokens: 100 })
- ).rejects.toThrow('Sampling failed: Network error');
- });
-
- it('should validate response structure', async () => {
- const mockSampleFn = async () => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: '' }, // Empty text
- });
-
- const client = new SamplingClient(mockSampleFn);
-
- await expect(
- client.generate('Test', { maxTokens: 100 })
- ).rejects.toThrow('Invalid response from sampling function');
- });
- });
-
- describe('TestGenerator', () => {
- it('should generate tests from specification', async () => {
- const mockSampleFn = async (request: any) => ({
- role: 'assistant' as const,
- content: {
- type: 'text' as const,
- text: `describe('Calculator', () => {
- it('should add numbers', () => {
- expect(add(1, 2)).toBe(3);
- });
-});`,
- },
- });
-
- const client = new SamplingClient(mockSampleFn);
- const generator = new TestGenerator(client);
-
- const spec = 'Calculator with add function';
- const result = await generator.generateTests(spec);
-
- expect(result).toContain('describe');
- expect(result).toContain('it(');
- expect(result).toContain('expect');
- });
-
- it('should generate tests from source code', async () => {
- const mockSampleFn = async (request: any) => ({
- role: 'assistant' as const,
- content: {
- type: 'text' as const,
- text: `describe('multiply', () => {
- it('should multiply two numbers', () => {
- expect(multiply(2, 3)).toBe(6);
- });
-});`,
- },
- });
-
- const client = new SamplingClient(mockSampleFn);
- const generator = new TestGenerator(client);
-
- const code = `
-export function multiply(a: number, b: number): number {
- return a * b;
-}
-`;
- const result = await generator.generateTestsFromCode(code);
-
- expect(result).toContain('describe');
- expect(result).toContain('multiply');
- expect(result).toContain('expect');
- });
-
- it('should validate specification input', async () => {
- const mockSampleFn = async () => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'test' },
- });
-
- const client = new SamplingClient(mockSampleFn);
- const generator = new TestGenerator(client);
-
- await expect(
- generator.generateTests('')
- ).rejects.toThrow('Specification cannot be empty');
- });
-
- it('should validate code input', async () => {
- const mockSampleFn = async () => ({
- role: 'assistant' as const,
- content: { type: 'text' as const, text: 'test' },
- });
-
- const client = new SamplingClient(mockSampleFn);
- const generator = new TestGenerator(client);
-
- await expect(
- generator.generateTestsFromCode('')
- ).rejects.toThrow('Code cannot be empty');
- });
- });
-
- describe('generate-tests MCP Tool', () => {
- it('should be available in MCP server tool list', async () => {
- const server = await ClaudeCodeBuddyMCPServer.create();
- const handler = (server as any).server._requestHandlers.get('tools/list');
- const result = await handler({ method: 'tools/list', params: {} });
-
- // v2.8.0: Tool renamed to memesh-generate-tests
- const generateTestsTool = result.tools.find((t: any) => t.name === 'memesh-generate-tests');
-
- expect(generateTestsTool).toBeDefined();
- expect(generateTestsTool.description).toContain('test');
- expect(generateTestsTool.inputSchema).toBeDefined();
- });
- });
-});
-
-describe('MCP SDK 1.25.3 - End-to-End Workflows', () => {
- it('should complete full workflow: resource discovery -> read -> content generation', async () => {
- const server = await ClaudeCodeBuddyMCPServer.create();
-
- // Step 1: List available resources
- const listHandler = (server as any).server._requestHandlers.get('resources/list');
- const listResult = await listHandler({ method: 'resources/list', params: {} });
-
- expect(listResult.resources.length).toBeGreaterThan(0);
-
- // Step 2: Read a specific resource
- const readHandler = (server as any).server._requestHandlers.get('resources/read');
- const readResult = await readHandler({
- method: 'resources/read',
- params: { uri: 'ccb://agent/code-reviewer/status' },
- });
-
- expect(readResult.contents[0].text).toBeDefined();
-
- // Step 3: Use the data for content generation (simulated)
- const agentStatus = JSON.parse(readResult.contents[0].text);
- expect(agentStatus.capabilities).toBeDefined();
- });
-
- it('should handle error scenarios gracefully', async () => {
- const server = await ClaudeCodeBuddyMCPServer.create();
- const readHandler = (server as any).server._requestHandlers.get('resources/read');
-
- // Invalid URI should throw
- await expect(
- readHandler({
- method: 'resources/read',
- params: { uri: 'ccb://invalid/resource' },
- })
- ).rejects.toThrow();
- });
-});
diff --git a/tests/integration/memory-complete.test.ts b/tests/integration/memory-complete.test.ts
deleted file mode 100644
index 0d14ed06..00000000
--- a/tests/integration/memory-complete.test.ts
+++ /dev/null
@@ -1,592 +0,0 @@
-/**
- * Complete Memory System Integration Tests
- *
- * Comprehensive end-to-end testing of all memory features:
- * - UnifiedMemoryStore with all CRUD operations
- * - SmartMemoryQuery with context-aware ranking
- * - AutoTagger with 50+ technology detection
- * - AutoMemoryRecorder with event detection
- * - Memory ID validation
- * - ESCAPE clause optimization
- * - Metadata size limits
- *
- * Tests cover complete workflows and feature interactions.
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { UnifiedMemoryStore } from '../../src/memory/UnifiedMemoryStore.js';
-import { SmartMemoryQuery } from '../../src/memory/SmartMemoryQuery.js';
-import { AutoTagger } from '../../src/memory/AutoTagger.js';
-import { AutoMemoryRecorder } from '../../src/memory/AutoMemoryRecorder.js';
-import { KnowledgeGraph } from '../../src/knowledge-graph/index.js';
-import type { UnifiedMemory } from '../../src/memory/types/unified-memory.js';
-import { mkdtempSync, rmSync } from 'fs';
-import { join } from 'path';
-import { tmpdir } from 'os';
-
-describe('Memory System Integration', () => {
- let kg: KnowledgeGraph;
- let memoryStore: UnifiedMemoryStore;
- let smartQuery: SmartMemoryQuery;
- let autoTagger: AutoTagger;
- let autoRecorder: AutoMemoryRecorder;
- let tempDir: string;
-
- beforeEach(async () => {
- // Create temp directory for test database
- tempDir = mkdtempSync(join(tmpdir(), 'memory-integration-'));
- const dbPath = join(tempDir, 'test-kg.db');
-
- // Initialize all components
- kg = await KnowledgeGraph.create(dbPath);
- memoryStore = new UnifiedMemoryStore(kg);
- smartQuery = new SmartMemoryQuery();
- autoTagger = new AutoTagger();
- autoRecorder = new AutoMemoryRecorder(memoryStore);
- });
-
- afterEach(() => {
- kg.close();
- rmSync(tempDir, { recursive: true, force: true });
- });
-
- describe('Complete Memory Workflow', () => {
- it('should handle full memory lifecycle with auto-features', async () => {
- // Step 1: Create memory with auto-tagging
- const content = 'Implemented JWT authentication using TypeScript and Express middleware for secure API access';
- const manualTags = ['security', 'backend'];
-
- // Auto-tagger enhances tags
- const enhancedTags = autoTagger.generateTags(content, manualTags);
-
- // Should detect: typescript, express, backend, api (with prefixes)
- expect(enhancedTags).toContain('tech:typescript');
- expect(enhancedTags).toContain('tech:express');
- expect(enhancedTags).toContain('domain:backend'); // "middleware" triggers backend
- // Manual tags are preserved
- expect(enhancedTags).toContain('security');
- expect(enhancedTags).toContain('backend');
-
- // Step 2: Store memory
- const memory: UnifiedMemory = {
- type: 'knowledge',
- content,
- tags: enhancedTags,
- importance: 0.85,
- timestamp: new Date(),
- metadata: {
- implementation: 'middleware',
- algorithm: 'RS256'
- }
- };
-
- const id = await memoryStore.store(memory, {
- projectPath: '/backend/auth'
- });
-
- expect(id).toBeDefined();
-
- // Step 3: Retrieve and verify
- const retrieved = await memoryStore.get(id);
- expect(retrieved).not.toBeNull();
- expect(retrieved!.content).toBe(content);
- expect(retrieved!.tags).toContain('tech:typescript');
- expect(retrieved!.importance).toBe(0.85);
-
- // Step 4: Search with SmartMemoryQuery
- const allMemories = await memoryStore.search('', { techStack: [] });
- const searchResults = smartQuery.search('JWT authentication', allMemories, {
- techStack: ['typescript', 'nodejs']
- });
-
- expect(searchResults.length).toBeGreaterThan(0);
- expect(searchResults[0].id).toBe(id);
-
- // Step 5: Update memory
- await memoryStore.update(id, {
- importance: 0.9,
- metadata: {
- ...retrieved!.metadata,
- tested: true,
- coverage: 95
- }
- });
-
- const updated = await memoryStore.get(id);
- expect(updated!.importance).toBe(0.9);
- expect(updated!.metadata!.tested).toBe(true);
-
- // Step 6: Delete memory
- await memoryStore.delete(id);
- const deleted = await memoryStore.get(id);
- expect(deleted).toBeNull();
- });
-
- it('should handle auto-memory recording workflow', async () => {
- // Simulate code change event
- const codeChangeId = await autoRecorder.recordCodeChange({
- files: ['auth.ts', 'user.ts', 'session.ts', 'middleware.ts'],
- linesChanged: 120,
- description: 'Refactor authentication system to use JWT tokens',
- projectPath: '/backend'
- });
-
- expect(codeChangeId).not.toBeNull();
-
- // Verify auto-recorded memory
- const codeMemory = await memoryStore.get(codeChangeId!);
- expect(codeMemory).not.toBeNull();
- expect(codeMemory!.tags).toContain('auto-recorded');
- expect(codeMemory!.tags).toContain('code-change');
- expect(codeMemory!.importance).toBeGreaterThanOrEqual(0.6);
-
- // Simulate test failure event
- const testFailId = await autoRecorder.recordTestEvent({
- type: 'fail',
- testName: 'should validate JWT signature',
- error: 'Invalid signature algorithm',
- projectPath: '/backend/tests'
- });
-
- expect(testFailId).not.toBeNull();
-
- // Verify test failure recorded with high importance
- const testMemory = await memoryStore.get(testFailId!);
- expect(testMemory!.type).toBe('mistake');
- expect(testMemory!.importance).toBe(0.9);
- expect(testMemory!.tags).toContain('test');
- expect(testMemory!.tags).toContain('failure');
-
- // Simulate git commit
- const commitId = await autoRecorder.recordGitCommit({
- message: 'feat: implement JWT authentication with refresh tokens',
- filesChanged: 8,
- insertions: 250,
- deletions: 30,
- projectPath: '/backend'
- });
-
- expect(commitId).not.toBeNull();
-
- // Verify commit recorded
- const commitMemory = await memoryStore.get(commitId!);
- expect(commitMemory!.type).toBe('decision');
- expect(commitMemory!.tags).toContain('git');
- expect(commitMemory!.tags).toContain('commit');
-
- // Search all auto-recorded memories
- const autoMemories = await memoryStore.searchByTags(['auto-recorded']);
- expect(autoMemories.length).toBe(3);
- });
- });
-
- describe('Search and Ranking Integration', () => {
- it('should rank memories correctly with multiple factors', async () => {
- const now = new Date();
- const lastWeek = new Date(now.getTime() - 7 * 24 * 60 * 60 * 1000);
- const lastMonth = new Date(now.getTime() - 30 * 24 * 60 * 60 * 1000);
-
- // Create test memories with varying importance, recency, and relevance
- const memories: UnifiedMemory[] = [
- {
- type: 'knowledge',
- content: 'TypeScript strict mode best practices for type safety',
- tags: ['typescript', 'best-practice', 'type-safety'],
- importance: 0.9,
- timestamp: now, // Recent
- },
- {
- type: 'mistake',
- content: 'Security vulnerability in user authentication endpoint',
- tags: ['security', 'authentication', 'vulnerability'],
- importance: 0.95, // Highest importance
- timestamp: lastWeek,
- },
- {
- type: 'decision',
- content: 'Chose PostgreSQL over MySQL for ACID guarantees',
- tags: ['database', 'postgresql', 'architecture'],
- importance: 0.8,
- timestamp: lastMonth, // Older
- },
- {
- type: 'knowledge',
- content: 'React hooks patterns for state management',
- tags: ['react', 'hooks', 'frontend'],
- importance: 0.7,
- timestamp: now,
- },
- ];
-
- // Store all memories
- const ids: string[] = [];
- for (const memory of memories) {
- const id = await memoryStore.store(memory);
- ids.push(id);
- }
-
- // Test 1: Search for "typescript" with tech stack boost
- const tsResults = smartQuery.search('typescript best practices', memories, {
- techStack: ['typescript', 'nodejs']
- });
-
- // First result should be TypeScript memory (exact match + recent + tech boost)
- expect(tsResults[0].content).toContain('TypeScript strict mode');
-
- // Test 2: Search for "security" - importance should dominate
- const securityResults = smartQuery.search('security authentication', memories, {
- techStack: []
- });
-
- // Security vulnerability should rank first (highest importance 0.95)
- expect(securityResults[0].content).toContain('Security vulnerability');
-
- // Test 3: Filter by type
- const decisions = await memoryStore.searchByType('decision', '');
- expect(decisions.length).toBe(1);
- expect(decisions[0].content).toContain('PostgreSQL');
-
- // Test 4: Search by tags
- const authMemories = await memoryStore.searchByTags(['authentication']);
- expect(authMemories.length).toBeGreaterThan(0);
- expect(authMemories[0].tags).toContain('authentication');
- });
-
- it('should handle complex multi-keyword searches', async () => {
- // Create memories about different tech stacks
- const memories: UnifiedMemory[] = [
- {
- type: 'knowledge',
- content: 'React state management using Redux Toolkit with TypeScript',
- tags: ['react', 'redux', 'typescript', 'frontend'],
- importance: 0.9, // Higher importance for React
- timestamp: new Date(),
- },
- {
- type: 'knowledge',
- content: 'Vue 3 Composition API with TypeScript for reactive state',
- tags: ['vue', 'typescript', 'frontend'],
- importance: 0.75,
- timestamp: new Date(),
- },
- {
- type: 'knowledge',
- content: 'Angular services with TypeScript dependency injection',
- tags: ['angular', 'typescript', 'frontend'],
- importance: 0.7,
- timestamp: new Date(),
- },
- ];
-
- for (const memory of memories) {
- await memoryStore.store(memory);
- }
-
- // Search with React tech stack
- const allMemories = await memoryStore.search('', { techStack: [] });
- const reactResults = smartQuery.search('typescript state management', allMemories, {
- techStack: ['react', 'typescript']
- });
-
- // React + Redux should be in top results (tech stack boost)
- expect(reactResults.length).toBeGreaterThan(0);
- const hasReactMemory = reactResults.some(r => r.content.includes('React state management'));
- expect(hasReactMemory).toBe(true);
-
- // Search with Vue tech stack
- const vueResults = smartQuery.search('typescript state', allMemories, {
- techStack: ['vue', 'typescript']
- });
-
- // Vue should be in results
- expect(vueResults.length).toBeGreaterThan(0);
- const hasVueMemory = vueResults.some(r => r.content.includes('Vue 3 Composition'));
- expect(hasVueMemory).toBe(true);
- });
- });
-
- describe('Auto-Tagging Integration', () => {
- it('should detect technologies across different content types', async () => {
- // Test 1: Backend tech
- const backendTags = autoTagger.generateTags(
- 'Built REST API with Express and PostgreSQL database',
- []
- );
- expect(backendTags).toContain('tech:express');
- expect(backendTags).toContain('tech:postgresql');
- expect(backendTags.some(t => t.startsWith('domain:'))).toBe(true);
-
- // Test 2: Frontend tech
- const frontendTags = autoTagger.generateTags(
- 'Created UI using React and TypeScript',
- []
- );
- expect(frontendTags).toContain('tech:react');
- expect(frontendTags).toContain('tech:typescript');
- expect(frontendTags).toContain('domain:frontend');
-
- // Test 3: DevOps tech
- const devopsTags = autoTagger.generateTags(
- 'Deployed with AWS and Docker containers',
- []
- );
- expect(devopsTags).toContain('tech:aws');
- expect(devopsTags).toContain('tech:docker');
- });
-
- it('should preserve manual tags and add auto-detected tags', async () => {
- const content = 'Implemented caching layer using Redis for session storage';
- const manualTags = ['custom-feature', 'production-ready'];
-
- const enhancedTags = autoTagger.generateTags(content, manualTags);
-
- // Manual tags preserved
- expect(enhancedTags).toContain('custom-feature');
- expect(enhancedTags).toContain('production-ready');
-
- // Auto-detected tags added (with prefixes)
- expect(enhancedTags).toContain('tech:redis');
- expect(enhancedTags).toContain('domain:performance'); // caching is in performance domain
- expect(enhancedTags).toContain('domain:auth'); // session is in auth domain
- });
- });
-
- describe('Memory ID Validation Integration', () => {
- it('should reject invalid memory IDs without correct prefix', async () => {
- const memory: UnifiedMemory = {
- id: 'invalid-id-format',
- type: 'knowledge',
- content: 'Test memory',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- };
-
- await expect(memoryStore.store(memory)).rejects.toThrow('Memory ID must start with prefix');
- });
-
- it('should accept memory IDs with correct prefix', async () => {
- const memory: UnifiedMemory = {
- id: 'unified-memory-550e8400-e29b-41d4-a716-446655440000',
- type: 'knowledge',
- content: 'Test memory with valid UUID',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- };
-
- const id = await memoryStore.store(memory);
- expect(id).toBe('unified-memory-550e8400-e29b-41d4-a716-446655440000');
- });
- });
-
- describe('Metadata Size Validation Integration', () => {
- it('should reject oversized metadata (> 1MB)', async () => {
- const largeData = 'x'.repeat(1.1 * 1024 * 1024); // 1.1MB
-
- const memory: UnifiedMemory = {
- type: 'knowledge',
- content: 'Test memory',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- metadata: {
- largeField: largeData,
- },
- };
-
- await expect(memoryStore.store(memory)).rejects.toThrow('exceeds limit');
- });
-
- it('should accept metadata within size limits (< 1MB)', async () => {
- const okData = 'x'.repeat(900 * 1024); // 900KB
-
- const memory: UnifiedMemory = {
- type: 'knowledge',
- content: 'Test memory',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- metadata: {
- okField: okData,
- },
- };
-
- const id = await memoryStore.store(memory);
- expect(id).toBeDefined();
-
- const retrieved = await memoryStore.get(id);
- expect(retrieved!.metadata!.okField).toBe(okData);
- });
- });
-
- describe('ESCAPE Clause Integration', () => {
- it('should handle special characters in search with new ESCAPE clause', async () => {
- // Store memories with special characters
- const memories = [
- { content: 'Database migration_v1 completed', tags: ['database'] },
- { content: 'Test_case for user%authentication', tags: ['test'] },
- { content: 'API endpoint /users/:id implementation', tags: ['api'] },
- ];
-
- for (const mem of memories) {
- await memoryStore.store({
- type: 'knowledge',
- ...mem,
- importance: 0.5,
- timestamp: new Date(),
- });
- }
-
- // Search with underscore (should work with new ESCAPE '!')
- const results = await memoryStore.search('migration_v1', { techStack: [] });
- expect(results.length).toBeGreaterThan(0);
- expect(results[0].content).toContain('migration_v1');
- });
- });
-
- describe('Error Handling Integration', () => {
- it('should handle database errors gracefully', async () => {
- // Close database to simulate error
- kg.close();
-
- const memory: UnifiedMemory = {
- type: 'knowledge',
- content: 'Test memory',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- };
-
- // Should throw meaningful error
- await expect(memoryStore.store(memory)).rejects.toThrow();
- });
-
- it('should handle invalid memory types gracefully', async () => {
- const memory: any = {
- type: 'invalid-type',
- content: 'Test memory',
- tags: ['test'],
- importance: 0.5,
- timestamp: new Date(),
- };
-
- await expect(memoryStore.store(memory)).rejects.toThrow();
- });
- });
-
- describe('Performance Integration', () => {
- it('should handle bulk operations efficiently', async () => {
- const startTime = Date.now();
-
- // Store 100 memories
- const ids: string[] = [];
- for (let i = 0; i < 100; i++) {
- const memory: UnifiedMemory = {
- type: 'knowledge',
- content: `Test memory ${i} with TypeScript and React`,
- tags: ['test', 'bulk'],
- importance: 0.5,
- timestamp: new Date(),
- };
-
- const id = await memoryStore.store(memory);
- ids.push(id);
- }
-
- const storeTime = Date.now() - startTime;
- console.log(`Stored 100 memories in ${storeTime}ms`);
-
- // Should complete in reasonable time (< 5 seconds)
- expect(storeTime).toBeLessThan(5000);
-
- // Search should also be fast
- const searchStart = Date.now();
- const results = await memoryStore.search('TypeScript', {
- techStack: [],
- limit: 100, // Explicitly request 100 results (default is 50)
- });
- const searchTime = Date.now() - searchStart;
-
- console.log(`Searched ${results.length} memories in ${searchTime}ms`);
- expect(searchTime).toBeLessThan(1000);
- expect(results.length).toBe(100);
- });
- });
-
- describe('Real-World Scenarios', () => {
- it('should support a complete development session workflow', async () => {
- // Scenario: Developer implements a new feature
-
- // 1. Check existing knowledge (empty at start of session)
- const existingAuth = await memoryStore.search('authentication patterns', {
- techStack: ['typescript', 'nodejs']
- });
-
- // At the start of the workflow, no authentication memories exist yet
- // This verifies the search returns empty results gracefully
- expect(Array.isArray(existingAuth)).toBe(true);
-
- // 2. Auto-record code changes
- await autoRecorder.recordCodeChange({
- files: ['auth-service.ts', 'user-controller.ts', 'middleware/auth.ts'],
- linesChanged: 85,
- description: 'Implement two-factor authentication',
- projectPath: '/backend/auth'
- });
-
- // 3. Store implementation decision
- await memoryStore.store({
- type: 'decision',
- content: 'Chose TOTP (Time-based OTP) over SMS for 2FA due to better security and cost',
- tags: ['security', '2fa', 'authentication', 'totp'],
- importance: 0.85,
- timestamp: new Date(),
- metadata: {
- library: 'speakeasy',
- qrCodeGenerator: 'qrcode'
- }
- });
-
- // 4. Record test failure
- await autoRecorder.recordTestEvent({
- type: 'fail',
- testName: 'should verify TOTP token correctly',
- error: 'Token validation fails for valid tokens',
- projectPath: '/backend/tests/auth'
- });
-
- // 5. Store bug fix
- await memoryStore.store({
- type: 'mistake',
- content: 'TOTP validation failed due to clock skew - added 30s window tolerance',
- tags: ['bug-fix', '2fa', 'totp'],
- importance: 0.75,
- timestamp: new Date(),
- metadata: {
- issue: 'clock-skew',
- solution: 'Added ±1 window tolerance'
- }
- });
-
- // 6. Record successful commit
- await autoRecorder.recordGitCommit({
- message: 'feat: implement TOTP-based two-factor authentication',
- filesChanged: 6,
- insertions: 180,
- deletions: 20,
- projectPath: '/backend/auth'
- });
-
- // Verify complete session recorded
- const sessionMemories = await memoryStore.searchByTags(['auto-recorded']);
- expect(sessionMemories.length).toBeGreaterThanOrEqual(2); // code change + commit
-
- // Future developer can search and learn
- const twoFactorKnowledge = await memoryStore.search('two-factor authentication', {
- techStack: ['typescript', 'nodejs']
- });
- expect(twoFactorKnowledge.length).toBeGreaterThan(0);
- });
- });
-});
diff --git a/tests/integrations/session-memory/SessionContextInjector.test.ts b/tests/integrations/session-memory/SessionContextInjector.test.ts
deleted file mode 100644
index 5fc5d41a..00000000
--- a/tests/integrations/session-memory/SessionContextInjector.test.ts
+++ /dev/null
@@ -1,712 +0,0 @@
-import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
-import { SessionContextInjector } from '../../../src/integrations/session-memory/SessionContextInjector.js';
-import type { Entity, EntityType } from '../../../src/knowledge-graph/types.js';
-import { AUTO_TAGS } from '../../../src/integrations/session-memory/types.js';
-
-// ─── Test Helpers ────────────────────────────────────────────────────
-
-/**
- * Create a mock KnowledgeGraph with methods needed by SessionContextInjector.
- */
-function createMockKnowledgeGraph(): {
- searchEntities: Mock;
- getEntity: Mock;
-} {
- return {
- searchEntities: vi.fn(() => []),
- getEntity: vi.fn(() => null),
- };
-}
-
-/**
- * Create a realistic Entity for testing.
- * Mimics entities ingested from native session memory via SessionMemoryIngester.
- */
-function createEntity(
- overrides: Partial & { name: string; entityType: EntityType },
-): Entity {
- return {
- id: Math.floor(Math.random() * 10000),
- observations: [],
- tags: [AUTO_TAGS.SOURCE, AUTO_TAGS.AUTO_INGESTED],
- metadata: {},
- createdAt: new Date('2025-02-01T10:00:00Z'),
- ...overrides,
- };
-}
-
-// ─── Tests ───────────────────────────────────────────────────────────
-
-describe('SessionContextInjector', () => {
- let mockKG: ReturnType;
- let injector: SessionContextInjector;
-
- beforeEach(() => {
- mockKG = createMockKnowledgeGraph();
- injector = new SessionContextInjector(
- mockKG as never,
- { maxItemsPerSection: 5, maxOutputChars: 4000 },
- );
- });
-
- // ── 1. Empty knowledge graph ──────────────────────────────────────
-
- describe('empty knowledge graph', () => {
- it('returns empty string when no relevant entities exist', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- const result = injector.generateContext();
-
- expect(result).toBe('');
- });
-
- it('returns empty string when context is provided but no entities', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- const result = injector.generateContext({
- projectPath: '/some/project',
- gitBranch: 'feature/test',
- });
-
- expect(result).toBe('');
- });
- });
-
- // ── 2. Lessons injection ──────────────────────────────────────────
-
- describe('lessons injection', () => {
- it('finds lesson_learned entities with source tag and formats them', () => {
- const lessons: Entity[] = [
- createEntity({
- name: 'lesson-sqlite-migration',
- entityType: 'lesson_learned',
- observations: [
- 'Error: ALTER TABLE failed for SQLite',
- 'Correction: Check PRAGMA table_info before adding columns',
- 'source_session: abc123',
- ],
- }),
- createEntity({
- name: 'lesson-async-cleanup',
- entityType: 'lesson_learned',
- observations: [
- 'Error: Race condition in cleanup',
- 'Correction: Use cleanupInProgress flag to guard concurrent ops',
- ],
- }),
- ];
-
- // Return lessons for lesson_learned query, empty for everything else
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return lessons;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Past Lessons');
- expect(result).toContain('ALTER TABLE failed for SQLite');
- expect(result).toContain('Race condition in cleanup');
- });
-
- it('queries with correct entityType and tag for lessons', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- injector.generateContext();
-
- // Verify at least one call was made for lesson_learned with source tag
- const lessonCall = mockKG.searchEntities.mock.calls.find(
- (call: [{ entityType?: EntityType; tag?: string }]) =>
- call[0].entityType === 'lesson_learned' && call[0].tag === AUTO_TAGS.SOURCE,
- );
- expect(lessonCall).toBeDefined();
- });
- });
-
- // ── 3. Best practices injection ───────────────────────────────────
-
- describe('best practices injection', () => {
- it('finds best_practice entities and formats them', () => {
- const practices: Entity[] = [
- createEntity({
- name: 'practice-use-content-hash',
- entityType: 'best_practice',
- observations: [
- 'Use content hash for database-level deduplication in KnowledgeGraph',
- ],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'best_practice' && query.tag === AUTO_TAGS.SOURCE) {
- return practices;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Best Practices');
- expect(result).toContain('content hash for database-level deduplication');
- });
- });
-
- // ── 4. Prevention rules injection ─────────────────────────────────
-
- describe('prevention rules injection', () => {
- it('finds prevention_rule entities and formats them with warning emphasis', () => {
- const rules: Entity[] = [
- createEntity({
- name: 'prevent-force-push-main',
- entityType: 'prevention_rule',
- observations: [
- 'NEVER force push to main branch - can destroy production history',
- ],
- }),
- createEntity({
- name: 'prevent-console-log-prod',
- entityType: 'prevention_rule',
- observations: [
- 'Do not leave console.log in production code',
- ],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'prevention_rule' && query.tag === AUTO_TAGS.SOURCE) {
- return rules;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Prevention Rules');
- expect(result).toContain('force push to main branch');
- expect(result).toContain('console.log in production');
- });
- });
-
- // ── 5. Decisions injection with git branch ────────────────────────
-
- describe('decisions injection', () => {
- it('finds decision entities relevant to current git branch', () => {
- const decisions: Entity[] = [
- createEntity({
- name: 'decision-session-memory-schema',
- entityType: 'decision',
- observations: [
- 'Use FTS5 for full-text search in session memory integration',
- 'branch: feature/session-memory-integration',
- ],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string; namePattern?: string }) => {
- if (query.entityType === 'decision' && query.namePattern) {
- return decisions;
- }
- return [];
- });
-
- const result = injector.generateContext({
- gitBranch: 'feature/session-memory-integration',
- });
-
- expect(result).toContain('Relevant Decisions');
- expect(result).toContain('FTS5 for full-text search');
- });
-
- it('also queries general decisions when no git branch provided', () => {
- const decisions: Entity[] = [
- createEntity({
- name: 'decision-use-sqlite',
- entityType: 'decision',
- observations: [
- 'Use SQLite over PostgreSQL for portability',
- ],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'decision' && query.tag === AUTO_TAGS.SOURCE) {
- return decisions;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Relevant Decisions');
- expect(result).toContain('SQLite over PostgreSQL');
- });
- });
-
- // ── 6. Recent sessions ────────────────────────────────────────────
-
- describe('recent sessions', () => {
- it('finds recent session_snapshot entities and shows titles', () => {
- const sessions: Entity[] = [
- createEntity({
- name: 'session-snapshot-abc123',
- entityType: 'session_snapshot',
- observations: [
- 'title: Implement SessionMemoryParser with TDD',
- 'status: completed',
- ],
- createdAt: new Date('2025-02-05T10:00:00Z'),
- }),
- createEntity({
- name: 'session-snapshot-def456',
- entityType: 'session_snapshot',
- observations: [
- 'title: Fix KnowledgeGraph FTS5 index migration',
- 'status: completed',
- ],
- createdAt: new Date('2025-02-04T10:00:00Z'),
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'session_snapshot' && query.tag === AUTO_TAGS.SOURCE) {
- return sessions;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Recent Sessions');
- expect(result).toContain('Implement SessionMemoryParser with TDD');
- expect(result).toContain('Fix KnowledgeGraph FTS5 index migration');
- });
- });
-
- // ── 7. Output format ──────────────────────────────────────────────
-
- describe('output format', () => {
- it('produces well-formatted text with banner and section headers', () => {
- // Provide at least one entity so output is non-empty
- const lessons: Entity[] = [
- createEntity({
- name: 'lesson-test-format',
- entityType: 'lesson_learned',
- observations: ['Test formatting observation'],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return lessons;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- // Check banner markers
- expect(result).toContain('MeMesh Enhanced Context');
- expect(result).toContain('Knowledge Graph');
-
- // Check section delimiter lines exist (double horizontal lines)
- const lines = result.split('\n');
- const delimiterLines = lines.filter((l) => l.includes('═'));
- expect(delimiterLines.length).toBeGreaterThanOrEqual(2);
- });
-
- it('does not include empty sections', () => {
- // Only lessons exist, other sections should be absent
- const lessons: Entity[] = [
- createEntity({
- name: 'lesson-only',
- entityType: 'lesson_learned',
- observations: ['Only lessons exist'],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return lessons;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Past Lessons');
- expect(result).not.toContain('Prevention Rules');
- expect(result).not.toContain('Best Practices');
- expect(result).not.toContain('Recent Sessions');
- });
- });
-
- // ── 8. Limit control ──────────────────────────────────────────────
-
- describe('limit control', () => {
- it('respects maxItemsPerSection parameter (default 5)', () => {
- // Create 10 lessons
- const manyLessons: Entity[] = Array.from({ length: 10 }, (_, i) =>
- createEntity({
- name: `lesson-${i}`,
- entityType: 'lesson_learned',
- observations: [`Lesson number ${i}`],
- }),
- );
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return manyLessons;
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- // Count bullet items in the lessons section
- // Each entity produces a "- " line item
- const lessonLines = result
- .split('\n')
- .filter((line) => line.trim().startsWith('- '));
-
- // Default maxItemsPerSection = 5
- expect(lessonLines.length).toBeLessThanOrEqual(5);
- });
-
- it('passes limit to searchEntities query', () => {
- const customInjector = new SessionContextInjector(
- mockKG as never,
- { maxItemsPerSection: 3 },
- );
-
- mockKG.searchEntities.mockReturnValue([]);
-
- customInjector.generateContext();
-
- // Verify limit passed to searchEntities
- for (const call of mockKG.searchEntities.mock.calls) {
- const query = call[0] as { limit?: number };
- expect(query.limit).toBeLessThanOrEqual(3);
- }
- });
- });
-
- // ── 9. Git branch context ─────────────────────────────────────────
-
- describe('git branch context', () => {
- it('searches for branch-relevant decisions when gitBranch is provided', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- injector.generateContext({
- gitBranch: 'feature/auth-refactor',
- });
-
- // Should have a call with namePattern containing the branch terms
- // Note: dashes are converted to spaces for better FTS5 tokenization
- const branchCall = mockKG.searchEntities.mock.calls.find(
- (call: [{ namePattern?: string; entityType?: EntityType }]) =>
- call[0].entityType === 'decision' && call[0].namePattern !== undefined,
- );
- expect(branchCall).toBeDefined();
- if (branchCall) {
- const pattern = (branchCall[0] as { namePattern: string }).namePattern;
- expect(pattern).toContain('auth');
- expect(pattern).toContain('refactor');
- }
- });
-
- it('includes branch name in search term for better FTS5 matching', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- injector.generateContext({
- gitBranch: 'feature/session-memory-integration',
- });
-
- const branchCall = mockKG.searchEntities.mock.calls.find(
- (call: [{ namePattern?: string; entityType?: EntityType }]) =>
- call[0].entityType === 'decision' && call[0].namePattern !== undefined,
- );
- expect(branchCall).toBeDefined();
- if (branchCall) {
- const pattern = (branchCall[0] as { namePattern: string }).namePattern;
- // Dashes converted to spaces for FTS5 tokenization
- expect(pattern).toContain('session');
- expect(pattern).toContain('memory');
- expect(pattern).toContain('integration');
- }
- });
- });
-
- // ── 10. Tag filtering ─────────────────────────────────────────────
-
- describe('tag filtering', () => {
- it('only queries entities tagged with source:native-session-memory', () => {
- mockKG.searchEntities.mockReturnValue([]);
-
- injector.generateContext();
-
- // Every searchEntities call (except possibly branch-specific decision searches)
- // should include tag = AUTO_TAGS.SOURCE
- const calls = mockKG.searchEntities.mock.calls;
- expect(calls.length).toBeGreaterThan(0);
-
- for (const call of calls) {
- const query = call[0] as { tag?: string; namePattern?: string };
- // Branch-specific decision queries use namePattern instead of tag,
- // but standard queries must include the tag filter
- if (!query.namePattern) {
- expect(query.tag).toBe(AUTO_TAGS.SOURCE);
- }
- }
- });
- });
-
- // ── 11. Error handling ────────────────────────────────────────────
-
- describe('error handling', () => {
- it('returns empty string gracefully when KG query throws', () => {
- mockKG.searchEntities.mockImplementation(() => {
- throw new Error('Database locked');
- });
-
- const result = injector.generateContext();
-
- expect(result).toBe('');
- });
-
- it('returns empty string when searchEntities returns undefined', () => {
- mockKG.searchEntities.mockReturnValue(undefined);
-
- const result = injector.generateContext();
-
- expect(result).toBe('');
- });
-
- it('does not throw even when entity has malformed observations', () => {
- const badEntity = createEntity({
- name: 'bad-entity',
- entityType: 'lesson_learned',
- observations: [], // Empty observations
- });
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return [badEntity];
- }
- return [];
- });
-
- // Should not throw
- const result = injector.generateContext();
- expect(typeof result).toBe('string');
- });
- });
-
- // ── 12. Token budget (maxOutputChars) ─────────────────────────────
-
- describe('token budget', () => {
- it('truncates output to maxOutputChars', () => {
- const smallInjector = new SessionContextInjector(
- mockKG as never,
- { maxOutputChars: 200, maxItemsPerSection: 50 },
- );
-
- // Create many entities with long observations
- const manyLessons: Entity[] = Array.from({ length: 20 }, (_, i) =>
- createEntity({
- name: `lesson-long-${i}`,
- entityType: 'lesson_learned',
- observations: [
- `This is a very long lesson learned observation number ${i} that contains a lot of text to test the output character limit truncation behavior of the SessionContextInjector`,
- ],
- }),
- );
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return manyLessons;
- }
- return [];
- });
-
- const result = smallInjector.generateContext();
-
- // Output should be truncated around 200 chars (with some tolerance for closing banner)
- expect(result.length).toBeLessThanOrEqual(300);
- });
-
- it('defaults maxOutputChars to 4000 when not specified', () => {
- const defaultInjector = new SessionContextInjector(mockKG as never);
-
- // Create enough entities to potentially exceed default limit
- const manyEntities: Entity[] = Array.from({ length: 50 }, (_, i) =>
- createEntity({
- name: `lesson-default-${i}`,
- entityType: 'lesson_learned',
- observations: [
- `Observation ${i}: This is a fairly long observation text used to test the default character limit behavior.`,
- ],
- }),
- );
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return manyEntities;
- }
- return [];
- });
-
- const result = defaultInjector.generateContext();
-
- // Default limit is 4000 chars, with tolerance for closing banner
- expect(result.length).toBeLessThanOrEqual(4200);
- });
- });
-
- // ── Combined output ───────────────────────────────────────────────
-
- describe('combined output with multiple sections', () => {
- it('includes all populated sections in order', () => {
- const lessons: Entity[] = [
- createEntity({
- name: 'lesson-1',
- entityType: 'lesson_learned',
- observations: ['Error: Missed import\nCorrection: Always check imports'],
- }),
- ];
-
- const practices: Entity[] = [
- createEntity({
- name: 'practice-1',
- entityType: 'best_practice',
- observations: ['Always run tests before committing'],
- }),
- ];
-
- const rules: Entity[] = [
- createEntity({
- name: 'rule-1',
- entityType: 'prevention_rule',
- observations: ['Never commit secrets to version control'],
- }),
- ];
-
- const sessions: Entity[] = [
- createEntity({
- name: 'session-1',
- entityType: 'session_snapshot',
- observations: ['title: Setup CI/CD pipeline'],
- }),
- ];
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.tag !== AUTO_TAGS.SOURCE) return [];
- switch (query.entityType) {
- case 'lesson_learned':
- return lessons;
- case 'best_practice':
- return practices;
- case 'prevention_rule':
- return rules;
- case 'session_snapshot':
- return sessions;
- case 'decision':
- return [];
- default:
- return [];
- }
- });
-
- const result = injector.generateContext();
-
- // All sections present
- expect(result).toContain('Past Lessons');
- expect(result).toContain('Best Practices');
- expect(result).toContain('Prevention Rules');
- expect(result).toContain('Recent Sessions');
-
- // Verify ordering: Lessons before Practices before Sessions
- const lessonsIdx = result.indexOf('Past Lessons');
- const rulesIdx = result.indexOf('Prevention Rules');
- const practicesIdx = result.indexOf('Best Practices');
- const sessionsIdx = result.indexOf('Recent Sessions');
-
- expect(lessonsIdx).toBeLessThan(rulesIdx);
- expect(rulesIdx).toBeLessThan(practicesIdx);
- expect(practicesIdx).toBeLessThan(sessionsIdx);
- });
- });
-
- // ── Entity observation extraction ─────────────────────────────────
-
- describe('entity observation extraction', () => {
- it('extracts first meaningful observation from entity', () => {
- const entity = createEntity({
- name: 'lesson-extract-test',
- entityType: 'lesson_learned',
- observations: [
- 'source_session: abc123', // metadata-like - should be skipped
- 'Error: Forgot to handle null case in user lookup',
- ],
- });
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return [entity];
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Forgot to handle null case');
- });
-
- it('truncates individual items to ~200 chars', () => {
- const longObservation = 'A'.repeat(500);
- const entity = createEntity({
- name: 'lesson-long-obs',
- entityType: 'lesson_learned',
- observations: [longObservation],
- });
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'lesson_learned' && query.tag === AUTO_TAGS.SOURCE) {
- return [entity];
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- // Should not contain the full 500 char string
- expect(result).not.toContain(longObservation);
- // But should contain a truncated version with ellipsis
- expect(result).toContain('...');
- });
-
- it('extracts title from session_snapshot observations', () => {
- const session = createEntity({
- name: 'session-title-test',
- entityType: 'session_snapshot',
- observations: [
- 'title: Build Authentication Module',
- 'status: in-progress',
- 'files: src/auth/index.ts',
- ],
- });
-
- mockKG.searchEntities.mockImplementation((query: { entityType?: EntityType; tag?: string }) => {
- if (query.entityType === 'session_snapshot' && query.tag === AUTO_TAGS.SOURCE) {
- return [session];
- }
- return [];
- });
-
- const result = injector.generateContext();
-
- expect(result).toContain('Build Authentication Module');
- });
- });
-});
diff --git a/tests/integrations/session-memory/SessionMemoryIngester.test.ts b/tests/integrations/session-memory/SessionMemoryIngester.test.ts
deleted file mode 100644
index eb612c16..00000000
--- a/tests/integrations/session-memory/SessionMemoryIngester.test.ts
+++ /dev/null
@@ -1,766 +0,0 @@
-import { createHash } from 'crypto';
-import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
-import { SessionMemoryIngester } from '../../../src/integrations/session-memory/SessionMemoryIngester.js';
-import type { KnowledgeGraph } from '../../../src/knowledge-graph/index.js';
-import type { Entity, Relation } from '../../../src/knowledge-graph/types.js';
-import type {
- ParsedSessionMemory,
- SessionMemoryEvent,
- IngestionResult,
-} from '../../../src/integrations/session-memory/types.js';
-import { AUTO_TAGS } from '../../../src/integrations/session-memory/types.js';
-
-/**
- * Helper to compute content hash the same way the ingester does.
- */
-function computeHash(content: string): string {
- return createHash('sha256').update(content).digest('hex');
-}
-
-// ─── Test Helpers ────────────────────────────────────────────────────
-
-/**
- * Create a mock KnowledgeGraph with all methods stubbed.
- * Uses vi.fn() so tests can inspect calls and override behavior.
- */
-function createMockKnowledgeGraph(): {
- createEntity: Mock;
- createRelation: Mock;
- getEntity: Mock;
- searchEntities: Mock;
- transaction: Mock;
-} {
- const mock = {
- createEntity: vi.fn((entity: Entity) => entity.name),
- createRelation: vi.fn(),
- getEntity: vi.fn(() => null),
- searchEntities: vi.fn(() => []),
- transaction: vi.fn((fn: () => unknown) => fn()),
- };
- return mock;
-}
-
-/**
- * Create a minimal ParsedSessionMemory for testing.
- * Override specific fields as needed.
- */
-function createParsedMemory(
- overrides: Partial = {},
-): ParsedSessionMemory {
- return {
- title: 'Test Session Title',
- currentState: null,
- taskSpec: null,
- filesAndFunctions: [],
- workflow: [],
- errorsAndCorrections: [],
- codebaseDoc: null,
- learnings: [],
- worklog: [],
- rawSections: new Map(),
- ...overrides,
- };
-}
-
-/**
- * Create a minimal SessionMemoryEvent for testing.
- */
-function createEvent(
- overrides: Partial = {},
-): SessionMemoryEvent {
- return {
- sessionId: 'abc12345-6789-0def-ghij-klmnopqrstuv',
- projectPath: '/Users/dev/my-project',
- sanitizedPath: '-Users-dev-my-project',
- summaryPath: '/home/.claude/projects/-Users-dev-my-project/abc12345/session-memory/summary.md',
- content: '# Test content',
- timestamp: new Date('2025-01-15T10:00:00Z'),
- changeType: 'created',
- ...overrides,
- };
-}
-
-// ─── Tests ───────────────────────────────────────────────────────────
-
-describe('SessionMemoryIngester', () => {
- let mockKG: ReturnType;
- let ingester: SessionMemoryIngester;
-
- beforeEach(() => {
- mockKG = createMockKnowledgeGraph();
- ingester = new SessionMemoryIngester(
- mockKG as unknown as KnowledgeGraph,
- );
- });
-
- // ─── 1. Basic ingestion ──────────────────────────────────────────
-
- describe('basic ingestion', () => {
- it('should ingest a fully parsed session memory and return IngestionResult', async () => {
- const parsed = createParsedMemory({
- title: 'Implement auth module',
- errorsAndCorrections: [
- { error: 'Missing JWT secret', correction: 'Added env var JWT_SECRET' },
- ],
- learnings: [
- { content: 'Always validate token expiry', type: 'positive' },
- ],
- filesAndFunctions: [
- { path: 'src/auth/jwt.ts', description: 'JWT token handling' },
- ],
- workflow: [
- { command: 'npm test', description: 'Run test suite' },
- ],
- });
- const event = createEvent();
-
- const result = await ingester.ingest(parsed, event);
-
- expect(result.sessionId).toBe(event.sessionId);
- expect(result.entitiesCreated).toBeGreaterThan(0);
- expect(result.errors).toHaveLength(0);
- // Session + 1 error + 1 learning + 1 file + 1 workflow = 5 entities
- expect(mockKG.createEntity).toHaveBeenCalledTimes(5);
- });
- });
-
- // ─── 2. Session entity creation ──────────────────────────────────
-
- describe('session entity creation', () => {
- it('should create a session_snapshot entity with title, project, and timestamp', async () => {
- const parsed = createParsedMemory({ title: 'Deploy to production' });
- const event = createEvent({
- sessionId: 'deadbeef-1234-5678-9abc-def012345678',
- projectPath: '/Users/dev/my-app',
- timestamp: new Date('2025-06-01T14:30:00Z'),
- });
-
- await ingester.ingest(parsed, event);
-
- // First createEntity call should be the session entity
- const sessionCall = mockKG.createEntity.mock.calls[0][0] as Entity;
- expect(sessionCall.name).toBe('session:deadbeef');
- expect(sessionCall.entityType).toBe('session_snapshot');
- expect(sessionCall.observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('Deploy to production'),
- ]),
- );
- expect(sessionCall.metadata).toEqual(
- expect.objectContaining({
- sessionId: 'deadbeef-1234-5678-9abc-def012345678',
- projectPath: '/Users/dev/my-app',
- sourceType: 'claude-native-session-memory',
- }),
- );
- });
- });
-
- // ─── 3. ErrorCorrection -> lesson_learned mapping ────────────────
-
- describe('error to lesson_learned mapping', () => {
- it('should create a lesson_learned entity for each ErrorCorrection', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- {
- error: 'TypeScript strict null checks failed',
- correction: 'Added null guards to all optional params',
- failedApproach: 'Tried using non-null assertion',
- },
- {
- error: 'Build timeout on CI',
- correction: 'Increased timeout to 60s',
- },
- ],
- });
- const event = createEvent();
-
- await ingester.ingest(parsed, event);
-
- // Session entity + 2 lesson entities = 3 calls
- const lessonCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'lesson_learned');
-
- expect(lessonCalls).toHaveLength(2);
-
- // First lesson should have error, correction, AND failedApproach
- expect(lessonCalls[0].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('TypeScript strict null checks failed'),
- expect.stringContaining('Added null guards'),
- expect.stringContaining('non-null assertion'),
- ]),
- );
-
- // Second lesson should have error and correction only
- expect(lessonCalls[1].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('Build timeout on CI'),
- expect.stringContaining('Increased timeout'),
- ]),
- );
-
- // Name should be slugified
- expect(lessonCalls[0].name).toMatch(/^lesson:/);
- });
- });
-
- // ─── 4. Learning classification mapping ──────────────────────────
-
- describe('learning classification mapping', () => {
- it('should map negative learning to prevention_rule entity', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Never use dynamic code execution in production', type: 'negative' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const learningCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'prevention_rule');
-
- expect(learningCalls).toHaveLength(1);
- expect(learningCalls[0].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('dynamic code execution'),
- ]),
- );
- });
-
- it('should map positive learning to best_practice entity', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Use dependency injection for testability', type: 'positive' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const learningCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'best_practice');
-
- expect(learningCalls).toHaveLength(1);
- expect(learningCalls[0].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('dependency injection'),
- ]),
- );
- });
-
- it('should map neutral learning to best_practice entity', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Project uses ESM modules with .js extension', type: 'neutral' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const learningCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'best_practice');
-
- expect(learningCalls).toHaveLength(1);
- });
- });
-
- // ─── 5. File references -> feature entities ──────────────────────
-
- describe('file references to feature entities', () => {
- it('should create a feature entity for each FileReference', async () => {
- const parsed = createParsedMemory({
- filesAndFunctions: [
- { path: 'src/auth/jwt.ts', description: 'JWT token verification' },
- { path: 'src/db/migrations/001.sql', description: 'Initial schema' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const fileCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'feature');
-
- expect(fileCalls).toHaveLength(2);
-
- // Name should use colons for path separators
- expect(fileCalls[0].name).toBe('file:src:auth:jwt.ts');
- expect(fileCalls[1].name).toBe('file:src:db:migrations:001.sql');
-
- // Observations include path and description
- expect(fileCalls[0].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('src/auth/jwt.ts'),
- expect.stringContaining('JWT token verification'),
- ]),
- );
- });
- });
-
- // ─── 6. Workflow -> decision entity ──────────────────────────────
-
- describe('workflow to decision entity', () => {
- it('should aggregate workflow steps into a single decision entity', async () => {
- const parsed = createParsedMemory({
- workflow: [
- { command: 'npm install', description: 'Install deps' },
- { command: 'npm run build', description: 'Build project' },
- { command: 'npm test', description: 'Run tests' },
- ],
- });
- const event = createEvent({ sessionId: 'abcdef12-3456-7890-abcd-ef1234567890' });
-
- await ingester.ingest(parsed, event);
-
- const workflowCalls = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .filter((e) => e.entityType === 'decision');
-
- expect(workflowCalls).toHaveLength(1);
- expect(workflowCalls[0].name).toBe('workflow:session:abcdef12');
-
- // All workflow steps should be in observations
- expect(workflowCalls[0].observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('npm install'),
- expect.stringContaining('npm run build'),
- expect.stringContaining('npm test'),
- ]),
- );
- });
- });
-
- // ─── 7. Relation creation ───────────────────────────────────────
-
- describe('relation creation', () => {
- it('should create relations from session entity to child entities', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: 'Import error', correction: 'Fixed path' },
- ],
- learnings: [
- { content: 'Check imports carefully', type: 'positive' },
- ],
- filesAndFunctions: [
- { path: 'src/index.ts', description: 'Entry point' },
- ],
- });
- const event = createEvent();
-
- await ingester.ingest(parsed, event);
-
- // 3 child entities = 3 relations
- expect(mockKG.createRelation).toHaveBeenCalledTimes(3);
-
- // All relations should be FROM the session entity
- const relationCalls = mockKG.createRelation.mock.calls.map(
- (c) => c[0] as Relation,
- );
-
- const sessionName = `session:${event.sessionId.substring(0, 8)}`;
- for (const rel of relationCalls) {
- expect(rel.from).toBe(sessionName);
- }
-
- // Verify relation types
- const relTypes = relationCalls.map((r) => r.relationType);
- expect(relTypes).toContain('caused_by'); // lesson
- expect(relTypes).toContain('follows_pattern'); // learning
- expect(relTypes).toContain('depends_on'); // file
- });
-
- it('should create relation for workflow entity', async () => {
- const parsed = createParsedMemory({
- workflow: [
- { command: 'npm test', description: 'Run tests' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const relationCalls = mockKG.createRelation.mock.calls.map(
- (c) => c[0] as Relation,
- );
- expect(relationCalls).toHaveLength(1);
- expect(relationCalls[0].relationType).toBe('enabled_by');
- });
- });
-
- // ─── 8. Deduplication - content hash ─────────────────────────────
-
- describe('deduplication via content hash', () => {
- it('should skip entity creation when content hash matches existing', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Use strict mode', type: 'positive' },
- ],
- });
- const event = createEvent();
-
- // First ingestion: createEntity returns name normally
- await ingester.ingest(parsed, event);
-
- // Second ingestion: simulate KG returning existing entity name
- // (content_hash deduplication happens inside KG.createEntity)
- // The ingester should detect that the returned name matches
- // and count it as "skipped" rather than "created"
- mockKG.createEntity.mockClear();
- mockKG.createRelation.mockClear();
-
- // Simulate getEntity returning existing entity for dedup check.
- // Use the same hash computation as the ingester: contentHash('learning:Use strict mode')
- const expectedHash = computeHash('learning:Use strict mode');
- mockKG.getEntity.mockImplementation((name: string) => {
- if (name.startsWith('learning:')) {
- return {
- name,
- entityType: 'best_practice',
- observations: ['Use strict mode'],
- tags: [AUTO_TAGS.SOURCE, AUTO_TAGS.AUTO_INGESTED],
- contentHash: expectedHash,
- };
- }
- return null;
- });
-
- const result = await ingester.ingest(parsed, event);
-
- // Learning entity should be skipped (session entity still created)
- expect(result.entitiesSkipped).toBeGreaterThanOrEqual(1);
- });
- });
-
- // ─── 9. Deduplication - same name entity (append observations) ──
-
- describe('deduplication via same name entity', () => {
- it('should update existing entity with new observations when name matches', async () => {
- const parsed = createParsedMemory({
- filesAndFunctions: [
- { path: 'src/index.ts', description: 'Updated entry point with new exports' },
- ],
- });
- const event = createEvent();
-
- // Simulate getEntity finding existing entity with same name
- mockKG.getEntity.mockImplementation((name: string) => {
- if (name === 'file:src:index.ts') {
- return {
- name: 'file:src:index.ts',
- entityType: 'feature',
- observations: ['src/index.ts', 'Original entry point'],
- tags: [AUTO_TAGS.SOURCE, AUTO_TAGS.AUTO_INGESTED],
- };
- }
- return null;
- });
-
- const result = await ingester.ingest(parsed, event);
-
- // The file entity should be counted as updated (not created)
- expect(result.entitiesUpdated).toBeGreaterThanOrEqual(1);
-
- // createEntity should still be called (upsert behavior)
- const fileCall = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .find((e) => e.name === 'file:src:index.ts');
-
- expect(fileCall).toBeDefined();
- // New observations should include BOTH old and new
- expect(fileCall!.observations).toEqual(
- expect.arrayContaining([
- expect.stringContaining('Updated entry point'),
- ]),
- );
- });
- });
-
- // ─── 10. Auto-tagging ───────────────────────────────────────────
-
- describe('auto-tagging', () => {
- it('should add source:native-session-memory and auto-ingested tags to all entities', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Tag test learning', type: 'neutral' },
- ],
- filesAndFunctions: [
- { path: 'src/app.ts', description: 'App entry' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const allEntities = mockKG.createEntity.mock.calls.map(
- (c) => c[0] as Entity,
- );
-
- for (const entity of allEntities) {
- expect(entity.tags).toEqual(
- expect.arrayContaining([
- AUTO_TAGS.SOURCE,
- AUTO_TAGS.AUTO_INGESTED,
- ]),
- );
- }
- });
- });
-
- // ─── 11. Metadata ──────────────────────────────────────────────
-
- describe('metadata', () => {
- it('should include sessionId and sourceType in metadata for all entities', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: 'Test error', correction: 'Test fix' },
- ],
- });
- const event = createEvent({
- sessionId: 'meta-test-session-id-1234567890abcdef',
- });
-
- await ingester.ingest(parsed, event);
-
- const allEntities = mockKG.createEntity.mock.calls.map(
- (c) => c[0] as Entity,
- );
-
- for (const entity of allEntities) {
- expect(entity.metadata).toEqual(
- expect.objectContaining({
- sessionId: 'meta-test-session-id-1234567890abcdef',
- sourceType: 'claude-native-session-memory',
- }),
- );
- }
- });
- });
-
- // ─── 12. Empty parsed memory ───────────────────────────────────
-
- describe('empty parsed memory', () => {
- it('should create only the session entity when all sections are empty', async () => {
- const parsed = createParsedMemory({
- title: 'Empty Session',
- currentState: null,
- taskSpec: null,
- filesAndFunctions: [],
- workflow: [],
- errorsAndCorrections: [],
- learnings: [],
- worklog: [],
- });
-
- const result = await ingester.ingest(parsed, createEvent());
-
- // Only the session entity should be created
- expect(mockKG.createEntity).toHaveBeenCalledTimes(1);
-
- const sessionEntity = mockKG.createEntity.mock.calls[0][0] as Entity;
- expect(sessionEntity.entityType).toBe('session_snapshot');
- expect(sessionEntity.name).toMatch(/^session:/);
-
- // No relations should be created (no child entities)
- expect(mockKG.createRelation).not.toHaveBeenCalled();
-
- // Result should reflect only 1 entity created
- expect(result.entitiesCreated).toBe(1);
- expect(result.relationsCreated).toBe(0);
- });
- });
-
- // ─── 13. Error handling ────────────────────────────────────────
-
- describe('error handling', () => {
- it('should capture errors in result.errors and continue ingestion', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: 'First error', correction: 'First fix' },
- { error: 'Second error', correction: 'Second fix' },
- ],
- learnings: [
- { content: 'A good practice', type: 'positive' },
- ],
- });
-
- // Make the first lesson creation fail, but others succeed
- let callCount = 0;
- mockKG.createEntity.mockImplementation((entity: Entity) => {
- callCount++;
- // Fail on the second call (first lesson entity)
- if (callCount === 2) {
- throw new Error('Database write failed');
- }
- return entity.name;
- });
-
- const result = await ingester.ingest(parsed, createEvent());
-
- // Should have at least one error
- expect(result.errors.length).toBeGreaterThanOrEqual(1);
- expect(result.errors[0].message).toContain('Database write failed');
-
- // Should still create the third and fourth entities
- // (session=1, fail=2, second lesson=3, learning=4)
- expect(mockKG.createEntity).toHaveBeenCalledTimes(4);
- });
-
- it('should not fail entirely when createRelation throws', async () => {
- const parsed = createParsedMemory({
- learnings: [
- { content: 'Relation test', type: 'positive' },
- ],
- });
-
- mockKG.createRelation.mockImplementation(() => {
- throw new Error('Relation creation failed');
- });
-
- const result = await ingester.ingest(parsed, createEvent());
-
- // Should have error for the failed relation
- expect(result.errors.length).toBeGreaterThanOrEqual(1);
- expect(result.errors.some((e) => e.message.includes('Relation creation failed'))).toBe(true);
-
- // Entities should still be created
- expect(result.entitiesCreated).toBeGreaterThanOrEqual(1);
- });
- });
-
- // ─── 14. IngestionResult accuracy ──────────────────────────────
-
- describe('IngestionResult accuracy', () => {
- it('should accurately count created, updated, skipped, and relations', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: 'Bug A', correction: 'Fix A' },
- ],
- learnings: [
- { content: 'Practice B', type: 'positive' },
- ],
- filesAndFunctions: [
- { path: 'src/foo.ts', description: 'Foo module' },
- ],
- workflow: [
- { command: 'npm test', description: 'Test' },
- ],
- });
- const event = createEvent();
-
- const result = await ingester.ingest(parsed, event);
-
- // 5 entities: session + lesson + learning + file + workflow
- expect(result.entitiesCreated).toBe(5);
- expect(result.entitiesUpdated).toBe(0);
- expect(result.entitiesSkipped).toBe(0);
- // 4 relations: session->lesson, session->learning, session->file, session->workflow
- expect(result.relationsCreated).toBe(4);
- expect(result.errors).toHaveLength(0);
- expect(result.sessionId).toBe(event.sessionId);
- });
-
- it('should count errors separately from successful operations', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: 'Bug 1', correction: 'Fix 1' },
- { error: 'Bug 2', correction: 'Fix 2' },
- ],
- });
-
- // Fail on the second lesson entity (third call overall)
- let callIdx = 0;
- mockKG.createEntity.mockImplementation((entity: Entity) => {
- callIdx++;
- if (callIdx === 3) {
- throw new Error('DB error');
- }
- return entity.name;
- });
-
- const result = await ingester.ingest(parsed, createEvent());
-
- // session(1) + lesson1(2) + lesson2_fail(3) = 2 created, 1 error
- expect(result.entitiesCreated).toBe(2);
- expect(result.errors).toHaveLength(1);
- });
- });
-
- // ─── Slugify edge cases ────────────────────────────────────────
-
- describe('entity naming and slugification', () => {
- it('should slugify entity names: lowercase, hyphens for spaces, alphanumeric only', async () => {
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- {
- error: 'TypeScript Strict Mode: Cannot assign to readonly!',
- correction: 'Used spread operator',
- },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const lessonEntity = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .find((e) => e.entityType === 'lesson_learned');
-
- expect(lessonEntity).toBeDefined();
- // Should be lowercase, spaces->hyphens, special chars removed
- expect(lessonEntity!.name).toMatch(/^lesson:[a-z0-9-]+$/);
- // Should not exceed 60+prefix chars
- expect(lessonEntity!.name.length).toBeLessThanOrEqual(60 + 'lesson:'.length);
- });
-
- it('should truncate slugified names to 60 characters', async () => {
- const longError = 'A'.repeat(100) + ' very long error message that exceeds the limit';
- const parsed = createParsedMemory({
- errorsAndCorrections: [
- { error: longError, correction: 'Fix' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const lessonEntity = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .find((e) => e.entityType === 'lesson_learned');
-
- expect(lessonEntity).toBeDefined();
- // "lesson:" prefix + max 60 chars slug
- const slugPart = lessonEntity!.name.replace(/^lesson:/, '');
- expect(slugPart.length).toBeLessThanOrEqual(60);
- });
-
- it('should use session ID first 8 chars for session entity name', async () => {
- const event = createEvent({
- sessionId: '12345678-abcd-efgh-ijkl-mnopqrstuvwx',
- });
-
- await ingester.ingest(createParsedMemory(), event);
-
- const sessionEntity = mockKG.createEntity.mock.calls[0][0] as Entity;
- expect(sessionEntity.name).toBe('session:12345678');
- });
-
- it('should replace path separators with colons for file entities', async () => {
- const parsed = createParsedMemory({
- filesAndFunctions: [
- { path: '/absolute/path/to/file.ts', description: 'Absolute path' },
- ],
- });
-
- await ingester.ingest(parsed, createEvent());
-
- const fileEntity = mockKG.createEntity.mock.calls
- .map((c) => c[0] as Entity)
- .find((e) => e.entityType === 'feature');
-
- expect(fileEntity).toBeDefined();
- // Leading slash should be handled (trimmed or converted)
- expect(fileEntity!.name).toMatch(/^file:/);
- expect(fileEntity!.name).not.toContain('/');
- });
- });
-});
diff --git a/tests/integrations/session-memory/SessionMemoryParser.test.ts b/tests/integrations/session-memory/SessionMemoryParser.test.ts
deleted file mode 100644
index 6596f62a..00000000
--- a/tests/integrations/session-memory/SessionMemoryParser.test.ts
+++ /dev/null
@@ -1,658 +0,0 @@
-/**
- * SessionMemoryParser Test Suite
- *
- * TDD tests for parsing Claude Code's native session memory (summary.md)
- * into structured ParsedSessionMemory objects.
- */
-
-import { describe, it, expect } from 'vitest';
-import { SessionMemoryParser } from '../../../src/integrations/session-memory/SessionMemoryParser.js';
-import type {
- ParsedSessionMemory,
- FileReference,
- WorkflowStep,
- ErrorCorrection,
- Learning,
- WorklogEntry,
-} from '../../../src/integrations/session-memory/types.js';
-
-// ─── Test Fixtures ──────────────────────────────────────────────────
-
-const COMPLETE_SUMMARY_MD = `# Session Title
-
-_A short description of this session_
-
-Implement SessionMemoryParser for CCB TDD
-
-# Current State
-
-_What is the current state of the work?_
-
-Working on the parser implementation. Tests are written and passing.
-Need to integrate with the ingestion pipeline next.
-
-# Task specification
-
-_What did the user ask you to do?_
-
-Build a SessionMemoryParser that reads summary.md files and converts them
-into structured ParsedSessionMemory objects for the KnowledgeGraph.
-
-# Files and Functions
-
-_Important files and their purposes_
-
-- \`src/integrations/session-memory/SessionMemoryParser.ts\` - Main parser class that converts markdown to structured data
-- \`src/integrations/session-memory/types.ts\` - TypeScript type definitions for parsed session memory
-- \`tests/integrations/session-memory/SessionMemoryParser.test.ts\` - Test suite for the parser
-
-# Workflow
-
-_Bash commands you typically run_
-
-\`\`\`bash
-npx vitest run tests/integrations/session-memory/SessionMemoryParser.test.ts
-\`\`\`
-
-\`\`\`bash
-npx tsc --noEmit
-\`\`\`
-
-- \`npm run lint\` - Run linter to check code style
-
-# Errors & Corrections
-
-_Errors encountered and how they were fixed_
-
-- Error: TypeScript strict mode rejected implicit any in regex match groups
- - Correction: Added explicit type annotations for match results
- - Failed approach: Tried using non-null assertion but it masked real bugs
-
-- Error: Section splitting broke when content contained markdown headers inside code blocks
- - Correction: Pre-process to remove code block content before splitting, then restore
-
-# Codebase and System Documentation
-
-_Key system components and architecture_
-
-CCB uses a SQLite-based KnowledgeGraph for storing entities and relations.
-Session memory files are written by Claude Code at ~/.claude/projects/{path}/{session}/session-memory/summary.md.
-The parser converts these markdown files into structured data for ingestion.
-
-# Learnings
-
-_What worked, what didn't_
-
-- Avoid using complex regex for markdown parsing - simple line-by-line processing works better
-- The keyword-based learning classification works well for categorizing session insights
-- TypeScript strict mode catches many bugs early but requires more explicit typing
-- The system uses section headers as delimiters for parsing
-
-# Worklog
-
-_Chronological record of what was done_
-
-- [Step 1] Set up project structure and types
-- [Step 2] Wrote comprehensive test suite
-- [Step 3] Implemented parser with TDD approach
-- Reviewed and refactored error handling
-`;
-
-const TITLE_ONLY_MD = `# Session Title
-
-Quick Bug Fix Session
-`;
-
-const EMPTY_SECTIONS_MD = `# Session Title
-
-_A short description_
-
-Database Migration Debugging
-
-# Current State
-
-_What is the current state?_
-
-# Learnings
-
-_What worked, what didn't_
-
-# Worklog
-
-_Chronological record_
-`;
-
-const MALFORMED_MD = `# Session Title
-
-Some title here
-
-# Current State
-
-Content without italic description
-
-# Unknown Custom Section
-
-This section is not in the standard list.
-It should be preserved in rawSections.
-
-# Another Unknown
-
-More custom content here.
-`;
-
-// ─── Tests ──────────────────────────────────────────────────────────
-
-describe('SessionMemoryParser', () => {
- const parser = new SessionMemoryParser();
-
- describe('Section extraction', () => {
- it('should parse a complete summary.md with all standard sections', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
-
- expect(result.title).toBe('Implement SessionMemoryParser for CCB TDD');
- expect(result.currentState).toContain('Working on the parser implementation');
- expect(result.taskSpec).toContain('Build a SessionMemoryParser');
- expect(result.filesAndFunctions.length).toBeGreaterThan(0);
- expect(result.workflow.length).toBeGreaterThan(0);
- expect(result.errorsAndCorrections.length).toBeGreaterThan(0);
- expect(result.codebaseDoc).toContain('SQLite-based KnowledgeGraph');
- expect(result.learnings.length).toBeGreaterThan(0);
- expect(result.worklog.length).toBeGreaterThan(0);
- });
-
- it('should return all expected fields in the parsed result', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
-
- expect(result).toHaveProperty('title');
- expect(result).toHaveProperty('currentState');
- expect(result).toHaveProperty('taskSpec');
- expect(result).toHaveProperty('filesAndFunctions');
- expect(result).toHaveProperty('workflow');
- expect(result).toHaveProperty('errorsAndCorrections');
- expect(result).toHaveProperty('codebaseDoc');
- expect(result).toHaveProperty('learnings');
- expect(result).toHaveProperty('worklog');
- expect(result).toHaveProperty('rawSections');
- });
- });
-
- describe('Title parsing', () => {
- it('should extract the title from the Session Title section', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.title).toBe('Implement SessionMemoryParser for CCB TDD');
- });
-
- it('should extract title when it is the only section', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.title).toBe('Quick Bug Fix Session');
- });
-
- it('should return empty string for title when input is empty', () => {
- const result = parser.parse('');
- expect(result.title).toBe('');
- });
-
- it('should trim whitespace from the title', () => {
- const md = `# Session Title
-
- Whitespace Padded Title
-`;
- const result = parser.parse(md);
- expect(result.title).toBe('Whitespace Padded Title');
- });
- });
-
- describe('Italic description filtering', () => {
- it('should filter out italic description lines from all sections', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
-
- // The italic descriptions should not appear in parsed content
- expect(result.currentState).not.toContain('_What is the current state');
- expect(result.taskSpec).not.toContain('_What did the user ask');
- expect(result.codebaseDoc).not.toContain('_Key system components');
- });
-
- it('should return null for sections that only contain italic descriptions', () => {
- const result = parser.parse(EMPTY_SECTIONS_MD);
-
- // Current State section has only the italic description, no real content
- expect(result.currentState).toBeNull();
- });
-
- it('should filter italic lines from the title section as well', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.title).not.toContain('_A short description');
- });
-
- it('should not filter lines with underscores that are not italic markers', () => {
- const md = `# Session Title
-
-Test Title
-
-# Current State
-
-The variable_name uses snake_case formatting.
-This is _italic_ but has other content too.
-`;
- const result = parser.parse(md);
-
- // Lines with underscores in variable names or inline italic should be kept
- expect(result.currentState).toContain('variable_name');
- });
- });
-
- describe('Learning classification', () => {
- it('should classify "Avoid doing X" as negative', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- Avoid using complex regex for markdown parsing
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('negative');
- expect(result.learnings[0].content).toContain('Avoid using complex regex');
- });
-
- it('should classify "X works well" as positive', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- The keyword-based classification works well for categorizing insights
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('positive');
- });
-
- it('should classify "The system uses Y" as neutral', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- The system uses section headers as delimiters for parsing
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('neutral');
- });
-
- it('should classify "don\'t do X" as negative', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- Don't use mutable global state for configuration
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('negative');
- });
-
- it('should classify "X failed" as negative', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- The initial approach with dynamic code execution failed due to security restrictions
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('negative');
- });
-
- it('should classify "effective" as positive', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- Using structured logging was effective for debugging production issues
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('positive');
- });
-
- it('should classify "recommended" as positive', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- It is recommended to use dependency injection for testability
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('positive');
- });
-
- it('should classify ambiguous content as neutral', () => {
- const md = `# Session Title
-
-Test
-
-# Learnings
-
-- TypeScript strict mode requires more explicit typing in function signatures
-`;
- const result = parser.parse(md);
- expect(result.learnings).toHaveLength(1);
- expect(result.learnings[0].type).toBe('neutral');
- });
-
- it('should parse multiple learnings from the complete fixture', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.learnings.length).toBe(4);
-
- // "Avoid using complex regex..." -> negative
- expect(result.learnings[0].type).toBe('negative');
-
- // "...classification works well..." -> positive
- expect(result.learnings[1].type).toBe('positive');
-
- // "TypeScript strict mode catches many bugs..." -> neutral
- expect(result.learnings[2].type).toBe('neutral');
-
- // "The system uses..." -> neutral
- expect(result.learnings[3].type).toBe('neutral');
- });
- });
-
- describe('Error parsing', () => {
- it('should extract error/correction pairs from bullet points', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.errorsAndCorrections).toHaveLength(2);
- });
-
- it('should parse the error description from the main bullet', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.errorsAndCorrections[0].error).toContain(
- 'TypeScript strict mode rejected implicit any'
- );
- });
-
- it('should parse the correction from sub-bullet', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.errorsAndCorrections[0].correction).toContain(
- 'Added explicit type annotations'
- );
- });
-
- it('should parse the failed approach when present', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.errorsAndCorrections[0].failedApproach).toContain(
- 'non-null assertion'
- );
- });
-
- it('should handle error entries without a failed approach', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- // Second error entry has no "Failed approach" sub-bullet
- expect(result.errorsAndCorrections[1].error).toContain('Section splitting broke');
- expect(result.errorsAndCorrections[1].correction).toContain('Pre-process to remove code block');
- });
-
- it('should return empty array when no errors section exists', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.errorsAndCorrections).toEqual([]);
- });
- });
-
- describe('File reference parsing', () => {
- it('should extract file paths and descriptions', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.filesAndFunctions).toHaveLength(3);
- });
-
- it('should parse the file path from backtick-wrapped text', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const firstFile = result.filesAndFunctions[0];
- expect(firstFile.path).toBe('src/integrations/session-memory/SessionMemoryParser.ts');
- });
-
- it('should parse the description after the path', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const firstFile = result.filesAndFunctions[0];
- expect(firstFile.description).toContain('Main parser class');
- });
-
- it('should return empty array when no files section exists', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.filesAndFunctions).toEqual([]);
- });
-
- it('should handle file references without backticks', () => {
- const md = `# Session Title
-
-Test
-
-# Files and Functions
-
-- src/main.ts - Entry point for the application
-`;
- const result = parser.parse(md);
- expect(result.filesAndFunctions).toHaveLength(1);
- expect(result.filesAndFunctions[0].path).toBe('src/main.ts');
- expect(result.filesAndFunctions[0].description).toContain('Entry point');
- });
- });
-
- describe('Workflow parsing', () => {
- it('should extract commands from code blocks', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const codeBlockCommands = result.workflow.filter(
- (w) => w.command.includes('vitest') || w.command.includes('tsc')
- );
- expect(codeBlockCommands.length).toBeGreaterThanOrEqual(2);
- });
-
- it('should extract commands from bullet points', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const bulletCommand = result.workflow.find(
- (w) => w.command.includes('npm run lint')
- );
- expect(bulletCommand).toBeDefined();
- expect(bulletCommand!.description).toContain('Run linter');
- });
-
- it('should parse code block commands with their language tag stripped', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const vitestStep = result.workflow.find(
- (w) => w.command.includes('npx vitest')
- );
- expect(vitestStep).toBeDefined();
- // Command should not include the ```bash marker
- expect(vitestStep!.command).not.toContain('```');
- });
-
- it('should return empty array when no workflow section exists', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.workflow).toEqual([]);
- });
- });
-
- describe('Worklog parsing', () => {
- it('should parse chronological entries from bullet points', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.worklog).toHaveLength(4);
- });
-
- it('should extract marker/step number from bracketed prefix', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.worklog[0].marker).toBe('Step 1');
- expect(result.worklog[0].activity).toContain('Set up project structure');
- });
-
- it('should handle entries without markers', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- const lastEntry = result.worklog[3];
- expect(lastEntry.marker).toBeUndefined();
- expect(lastEntry.activity).toContain('Reviewed and refactored');
- });
-
- it('should return empty array when no worklog section exists', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.worklog).toEqual([]);
- });
-
- it('should return empty array when worklog section has only italic description', () => {
- const result = parser.parse(EMPTY_SECTIONS_MD);
- expect(result.worklog).toEqual([]);
- });
- });
-
- describe('Edge cases', () => {
- it('should throw on excessively large input (DoS protection)', () => {
- const hugeInput = 'x'.repeat(10 * 1024 * 1024 + 1);
- expect(() => parser.parse(hugeInput)).toThrow('Input too large');
- });
-
- it('should handle empty string input', () => {
- const result = parser.parse('');
-
- expect(result.title).toBe('');
- expect(result.currentState).toBeNull();
- expect(result.taskSpec).toBeNull();
- expect(result.filesAndFunctions).toEqual([]);
- expect(result.workflow).toEqual([]);
- expect(result.errorsAndCorrections).toEqual([]);
- expect(result.codebaseDoc).toBeNull();
- expect(result.learnings).toEqual([]);
- expect(result.worklog).toEqual([]);
- expect(result.rawSections.size).toBe(0);
- });
-
- it('should handle input with only a title section', () => {
- const result = parser.parse(TITLE_ONLY_MD);
-
- expect(result.title).toBe('Quick Bug Fix Session');
- expect(result.currentState).toBeNull();
- expect(result.taskSpec).toBeNull();
- expect(result.filesAndFunctions).toEqual([]);
- expect(result.workflow).toEqual([]);
- expect(result.errorsAndCorrections).toEqual([]);
- expect(result.codebaseDoc).toBeNull();
- expect(result.learnings).toEqual([]);
- expect(result.worklog).toEqual([]);
- });
-
- it('should handle malformed/partial sections gracefully', () => {
- const result = parser.parse(MALFORMED_MD);
-
- expect(result.title).toBe('Some title here');
- // Current State has no italic description, just raw content
- expect(result.currentState).toContain('Content without italic description');
- });
-
- it('should preserve unknown sections in rawSections', () => {
- const result = parser.parse(MALFORMED_MD);
-
- expect(result.rawSections.has('Unknown Custom Section')).toBe(true);
- expect(result.rawSections.get('Unknown Custom Section')).toContain(
- 'This section is not in the standard list'
- );
- expect(result.rawSections.has('Another Unknown')).toBe(true);
- expect(result.rawSections.get('Another Unknown')).toContain('More custom content');
- });
-
- it('should handle duplicate section headers by using the last occurrence', () => {
- const md = `# Session Title
-
-First Title
-
-# Session Title
-
-Second Title
-`;
- const result = parser.parse(md);
- // Last occurrence wins
- expect(result.title).toBe('Second Title');
- });
-
- it('should handle sections with only italic descriptions as empty', () => {
- const result = parser.parse(EMPTY_SECTIONS_MD);
-
- expect(result.title).toBe('Database Migration Debugging');
- expect(result.learnings).toEqual([]);
- expect(result.worklog).toEqual([]);
- });
-
- it('should handle content with Windows-style line endings (CRLF)', () => {
- const md = '# Session Title\r\n\r\nWindows Title\r\n\r\n# Current State\r\n\r\nSome state\r\n';
- const result = parser.parse(md);
-
- expect(result.title).toBe('Windows Title');
- expect(result.currentState).toContain('Some state');
- });
-
- it('should handle sections with no blank line after the header', () => {
- const md = `# Session Title
-Immediate Content Title
-`;
- const result = parser.parse(md);
- expect(result.title).toBe('Immediate Content Title');
- });
- });
-
- describe('Text content sections', () => {
- it('should parse Current State as trimmed text', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.currentState).toBe(
- 'Working on the parser implementation. Tests are written and passing.\nNeed to integrate with the ingestion pipeline next.'
- );
- });
-
- it('should parse Task specification as trimmed text', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.taskSpec).toContain('Build a SessionMemoryParser');
- expect(result.taskSpec).toContain('KnowledgeGraph');
- });
-
- it('should parse Codebase and System Documentation as trimmed text', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
- expect(result.codebaseDoc).toContain('SQLite-based KnowledgeGraph');
- expect(result.codebaseDoc).toContain('summary.md');
- });
-
- it('should return null for missing text sections', () => {
- const result = parser.parse(TITLE_ONLY_MD);
- expect(result.currentState).toBeNull();
- expect(result.taskSpec).toBeNull();
- expect(result.codebaseDoc).toBeNull();
- });
- });
-
- describe('rawSections Map', () => {
- it('should contain known sections in rawSections too', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
-
- // rawSections should include ALL sections' raw content
- expect(result.rawSections.has('Session Title')).toBe(true);
- expect(result.rawSections.has('Current State')).toBe(true);
- expect(result.rawSections.has('Learnings')).toBe(true);
- });
-
- it('should store raw unprocessed content (including italic lines) in rawSections', () => {
- const result = parser.parse(COMPLETE_SUMMARY_MD);
-
- // rawSections keeps the original content before filtering
- const rawCurrentState = result.rawSections.get('Current State');
- expect(rawCurrentState).toContain('_What is the current state');
- });
- });
-});
diff --git a/tests/integrations/session-memory/SessionMemoryPipeline.test.ts b/tests/integrations/session-memory/SessionMemoryPipeline.test.ts
deleted file mode 100644
index 61063824..00000000
--- a/tests/integrations/session-memory/SessionMemoryPipeline.test.ts
+++ /dev/null
@@ -1,445 +0,0 @@
-/**
- * SessionMemoryPipeline Test Suite
- *
- * TDD tests for the pipeline orchestrator that wires together
- * SessionMemoryWatcher, SessionMemoryParser, SessionMemoryIngester,
- * and SessionContextInjector into a cohesive processing pipeline.
- *
- * All sub-modules are mocked to isolate orchestration logic.
- */
-
-import { describe, it, expect, vi, beforeEach } from 'vitest';
-import { homedir } from 'os';
-import { join } from 'path';
-import type { SessionMemoryEvent, ParsedSessionMemory, IngestionResult } from '../../../src/integrations/session-memory/types.js';
-import type { InjectionContext } from '../../../src/integrations/session-memory/SessionContextInjector.js';
-
-// ─── Mocks ───────────────────────────────────────────────────────────
-
-// Use vi.hoisted() so mock references are available inside vi.mock factories
-// (vitest hoists vi.mock calls to the top of the file, before const declarations)
-const {
- mockWatcherStart,
- mockWatcherStop,
- mockParse,
- mockIngest,
- mockGenerateContext,
- captured,
-} = vi.hoisted(() => ({
- mockWatcherStart: vi.fn<() => Promise>().mockResolvedValue(undefined),
- mockWatcherStop: vi.fn<() => Promise>().mockResolvedValue(undefined),
- mockParse: vi.fn(),
- mockIngest: vi.fn(),
- mockGenerateContext: vi.fn(),
- captured: {
- onMemoryUpdate: null as ((event: SessionMemoryEvent) => Promise) | null,
- projectsDir: undefined as string | undefined,
- debounceMs: undefined as number | undefined,
- },
-}));
-
-// Mock logger to suppress output and allow spy assertions
-vi.mock('../../../src/utils/logger.js', () => ({
- logger: {
- info: vi.fn(),
- warn: vi.fn(),
- error: vi.fn(),
- debug: vi.fn(),
- },
-}));
-
-vi.mock('../../../src/integrations/session-memory/SessionMemoryWatcher.js', () => ({
- SessionMemoryWatcher: vi.fn().mockImplementation(function (this: any, config: {
- projectsDir: string;
- debounceMs?: number;
- onMemoryUpdate: (event: SessionMemoryEvent) => Promise;
- }) {
- captured.onMemoryUpdate = config.onMemoryUpdate;
- captured.projectsDir = config.projectsDir;
- captured.debounceMs = config.debounceMs;
- this.start = mockWatcherStart;
- this.stop = mockWatcherStop;
- this.isWatching = false;
- }),
-}));
-
-vi.mock('../../../src/integrations/session-memory/SessionMemoryParser.js', () => ({
- SessionMemoryParser: vi.fn().mockImplementation(function (this: any) {
- this.parse = mockParse;
- }),
-}));
-
-vi.mock('../../../src/integrations/session-memory/SessionMemoryIngester.js', () => ({
- SessionMemoryIngester: vi.fn().mockImplementation(function (this: any) {
- this.ingest = mockIngest;
- }),
-}));
-
-vi.mock('../../../src/integrations/session-memory/SessionContextInjector.js', () => ({
- SessionContextInjector: vi.fn().mockImplementation(function (this: any) {
- this.generateContext = mockGenerateContext;
- }),
-}));
-
-// Import after mocks are defined (hoisted by vitest)
-import { SessionMemoryPipeline } from '../../../src/integrations/session-memory/SessionMemoryPipeline.js';
-import { logger } from '../../../src/utils/logger.js';
-
-// ─── Test Helpers ────────────────────────────────────────────────────
-
-function createMockKnowledgeGraph() {
- return {
- createEntity: vi.fn(),
- createRelation: vi.fn(),
- getEntity: vi.fn(() => null),
- searchEntities: vi.fn(() => []),
- transaction: vi.fn((fn: () => unknown) => fn()),
- };
-}
-
-function createEvent(overrides: Partial = {}): SessionMemoryEvent {
- return {
- sessionId: 'abc12345-6789-0def-ghij-klmnopqrstuv',
- projectPath: '/Users/dev/my-project',
- sanitizedPath: '-Users-dev-my-project',
- summaryPath: '/home/.claude/projects/-Users-dev-my-project/abc12345/session-memory/summary.md',
- content: '# Session Title\n\n# Current State\nWorking on feature X',
- timestamp: new Date('2025-01-15T10:00:00Z'),
- changeType: 'created',
- ...overrides,
- };
-}
-
-function createParsedMemory(overrides: Partial = {}): ParsedSessionMemory {
- return {
- title: 'Test Session Title',
- currentState: 'Working on feature X',
- taskSpec: null,
- filesAndFunctions: [],
- workflow: [],
- errorsAndCorrections: [],
- codebaseDoc: null,
- learnings: [],
- worklog: [],
- rawSections: new Map(),
- ...overrides,
- };
-}
-
-// ─── Test Suite ──────────────────────────────────────────────────────
-
-describe('SessionMemoryPipeline', () => {
- let mockKG: ReturnType;
-
- beforeEach(() => {
- vi.clearAllMocks();
- captured.onMemoryUpdate = null;
- captured.projectsDir = undefined;
- captured.debounceMs = undefined;
-
- mockKG = createMockKnowledgeGraph();
- });
-
- // ─── Construction ───────────────────────────────────────────────
-
- describe('construction', () => {
- it('should create pipeline with default config (projectsDir defaults to ~/.claude/projects)', () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- expect(pipeline).toBeDefined();
- expect(pipeline.isRunning).toBe(false);
- // The watcher should receive the default projectsDir
- const expectedDir = join(homedir(), '.claude', 'projects');
- expect(captured.projectsDir).toBe(expectedDir);
- });
-
- it('should create pipeline with custom config', () => {
- const customDir = '/custom/projects/dir';
- const pipeline = new SessionMemoryPipeline(mockKG as any, {
- projectsDir: customDir,
- debounceMs: 5000,
- });
-
- expect(pipeline).toBeDefined();
- expect(captured.projectsDir).toBe(customDir);
- expect(captured.debounceMs).toBe(5000);
- });
- });
-
- // ─── start() / stop() lifecycle ─────────────────────────────────
-
- describe('start()', () => {
- it('should call watcher.start() and set isRunning to true', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- expect(pipeline.isRunning).toBe(false);
- await pipeline.start();
-
- expect(mockWatcherStart).toHaveBeenCalledTimes(1);
- expect(pipeline.isRunning).toBe(true);
- });
-
- it('should be idempotent (second start is a no-op)', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- await pipeline.start();
- await pipeline.start();
-
- // Watcher.start() should only be called once
- expect(mockWatcherStart).toHaveBeenCalledTimes(1);
- expect(pipeline.isRunning).toBe(true);
- });
- });
-
- describe('stop()', () => {
- it('should call watcher.stop() and set isRunning to false', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- await pipeline.start();
- expect(pipeline.isRunning).toBe(true);
-
- await pipeline.stop();
-
- expect(mockWatcherStop).toHaveBeenCalledTimes(1);
- expect(pipeline.isRunning).toBe(false);
- });
-
- it('should be idempotent (second stop is a no-op)', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- await pipeline.start();
- await pipeline.stop();
- await pipeline.stop();
-
- // Watcher.stop() should only be called once
- expect(mockWatcherStop).toHaveBeenCalledTimes(1);
- expect(pipeline.isRunning).toBe(false);
- });
-
- it('should be a no-op if pipeline was never started', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- await pipeline.stop();
-
- expect(mockWatcherStop).not.toHaveBeenCalled();
- expect(pipeline.isRunning).toBe(false);
- });
- });
-
- // ─── isRunning state tracking ───────────────────────────────────
-
- describe('isRunning state tracking', () => {
- it('should track the full lifecycle: false -> start -> true -> stop -> false', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- expect(pipeline.isRunning).toBe(false);
-
- await pipeline.start();
- expect(pipeline.isRunning).toBe(true);
-
- await pipeline.stop();
- expect(pipeline.isRunning).toBe(false);
- });
-
- it('should support restart: start -> stop -> start', async () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- await pipeline.start();
- expect(pipeline.isRunning).toBe(true);
-
- await pipeline.stop();
- expect(pipeline.isRunning).toBe(false);
-
- await pipeline.start();
- expect(pipeline.isRunning).toBe(true);
- expect(mockWatcherStart).toHaveBeenCalledTimes(2);
- expect(mockWatcherStop).toHaveBeenCalledTimes(1);
- });
- });
-
- // ─── End-to-end: handleMemoryUpdate pipeline ───────────────────
-
- describe('handleMemoryUpdate (end-to-end pipeline)', () => {
- it('should parse event content and ingest the result', async () => {
- // Create pipeline to register callback (side effect)
- new SessionMemoryPipeline(mockKG as any);
-
- const event = createEvent();
- const parsed = createParsedMemory();
- const ingestionResult: IngestionResult = {
- entitiesCreated: 3,
- entitiesUpdated: 0,
- entitiesSkipped: 0,
- relationsCreated: 2,
- sessionId: event.sessionId,
- errors: [],
- };
-
- mockParse.mockReturnValue(parsed);
- mockIngest.mockResolvedValue(ingestionResult);
-
- // Trigger the pipeline callback captured during construction
- expect(captured.onMemoryUpdate).not.toBeNull();
- await captured.onMemoryUpdate!(event);
-
- // Parser should receive the raw content from the event
- expect(mockParse).toHaveBeenCalledTimes(1);
- expect(mockParse).toHaveBeenCalledWith(event.content);
-
- // Ingester should receive the parsed result and the event
- expect(mockIngest).toHaveBeenCalledTimes(1);
- expect(mockIngest).toHaveBeenCalledWith(parsed, event);
-
- // Logger should record success
- expect(logger.info).toHaveBeenCalledWith(
- 'SessionMemoryPipeline: ingested session memory',
- expect.objectContaining({
- sessionId: event.sessionId,
- changeType: event.changeType,
- }),
- );
- });
-
- it('should handle updated change type correctly', async () => {
- // Create pipeline to register callback (side effect)
- new SessionMemoryPipeline(mockKG as any);
-
- const event = createEvent({ changeType: 'updated' });
- const parsed = createParsedMemory();
- const ingestionResult: IngestionResult = {
- entitiesCreated: 0,
- entitiesUpdated: 2,
- entitiesSkipped: 1,
- relationsCreated: 0,
- sessionId: event.sessionId,
- errors: [],
- };
-
- mockParse.mockReturnValue(parsed);
- mockIngest.mockResolvedValue(ingestionResult);
-
- await captured.onMemoryUpdate!(event);
-
- expect(logger.info).toHaveBeenCalledWith(
- 'SessionMemoryPipeline: ingested session memory',
- expect.objectContaining({
- sessionId: event.sessionId,
- changeType: 'updated',
- }),
- );
- });
- });
-
- // ─── Error handling ─────────────────────────────────────────────
-
- describe('error handling', () => {
- it('should catch and log parser errors without crashing', async () => {
- // Construction captures the onMemoryUpdate callback (pipeline itself unused)
- const _pipeline = new SessionMemoryPipeline(mockKG as any);
-
- const event = createEvent();
- mockParse.mockImplementation(() => {
- throw new Error('Parser exploded');
- });
-
- // Should NOT throw
- await expect(captured.onMemoryUpdate!(event)).resolves.toBeUndefined();
-
- // Logger should record the error
- expect(logger.error).toHaveBeenCalledWith(
- 'SessionMemoryPipeline: failed to process memory update',
- expect.objectContaining({
- sessionId: event.sessionId,
- error: 'Parser exploded',
- }),
- );
-
- // Ingester should NOT have been called
- expect(mockIngest).not.toHaveBeenCalled();
- });
-
- it('should catch and log ingester errors without crashing', async () => {
- const _pipeline = new SessionMemoryPipeline(mockKG as any);
-
- const event = createEvent();
- const parsed = createParsedMemory();
-
- mockParse.mockReturnValue(parsed);
- mockIngest.mockRejectedValue(new Error('Ingester failed'));
-
- // Should NOT throw
- await expect(captured.onMemoryUpdate!(event)).resolves.toBeUndefined();
-
- // Logger should record the error
- expect(logger.error).toHaveBeenCalledWith(
- 'SessionMemoryPipeline: failed to process memory update',
- expect.objectContaining({
- sessionId: event.sessionId,
- error: 'Ingester failed',
- }),
- );
- });
-
- it('should handle non-Error thrown values gracefully', async () => {
- const _pipeline = new SessionMemoryPipeline(mockKG as any);
-
- const event = createEvent();
- mockParse.mockImplementation(() => {
- throw 'string error';
- });
-
- await expect(captured.onMemoryUpdate!(event)).resolves.toBeUndefined();
-
- expect(logger.error).toHaveBeenCalledWith(
- 'SessionMemoryPipeline: failed to process memory update',
- expect.objectContaining({
- sessionId: event.sessionId,
- error: 'string error',
- }),
- );
- });
- });
-
- // ─── generateContext delegation ─────────────────────────────────
-
- describe('generateContext()', () => {
- it('should delegate to injector.generateContext() with no arguments', () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- mockGenerateContext.mockReturnValue('## Context Block');
-
- const result = pipeline.generateContext();
-
- expect(mockGenerateContext).toHaveBeenCalledTimes(1);
- expect(mockGenerateContext).toHaveBeenCalledWith(undefined);
- expect(result).toBe('## Context Block');
- });
-
- it('should delegate to injector.generateContext() with injection context', () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- const ctx: InjectionContext = {
- projectPath: '/Users/dev/my-project',
- gitBranch: 'feature/session-memory',
- };
-
- mockGenerateContext.mockReturnValue('## Branch Context');
-
- const result = pipeline.generateContext(ctx);
-
- expect(mockGenerateContext).toHaveBeenCalledTimes(1);
- expect(mockGenerateContext).toHaveBeenCalledWith(ctx);
- expect(result).toBe('## Branch Context');
- });
-
- it('should return empty string when injector returns empty', () => {
- const pipeline = new SessionMemoryPipeline(mockKG as any);
-
- mockGenerateContext.mockReturnValue('');
-
- const result = pipeline.generateContext();
-
- expect(result).toBe('');
- });
- });
-});
diff --git a/tests/integrations/session-memory/SessionMemoryWatcher.test.ts b/tests/integrations/session-memory/SessionMemoryWatcher.test.ts
deleted file mode 100644
index cae43093..00000000
--- a/tests/integrations/session-memory/SessionMemoryWatcher.test.ts
+++ /dev/null
@@ -1,587 +0,0 @@
-/**
- * SessionMemoryWatcher Test Suite
- *
- * TDD tests for watching Claude Code's native session memory files (summary.md)
- * and emitting SessionMemoryEvent objects on creation/update.
- *
- * All filesystem and chokidar interactions are mocked.
- */
-
-import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
-import { EventEmitter } from 'events';
-import type { SessionMemoryEvent } from '../../../src/integrations/session-memory/types.js';
-
-// ─── Mocks ───────────────────────────────────────────────────────────
-
-// Mock chokidar: create a fake FSWatcher backed by EventEmitter
-class MockFSWatcher extends EventEmitter {
- closed = false;
- async close(): Promise {
- this.closed = true;
- this.removeAllListeners();
- }
-}
-
-let mockWatcherInstance: MockFSWatcher;
-
-vi.mock('chokidar', () => {
- return {
- default: {
- watch: vi.fn(() => {
- mockWatcherInstance = new MockFSWatcher();
- return mockWatcherInstance;
- }),
- },
- };
-});
-
-// Mock fs/promises: readFile returns controlled content
-const mockReadFile = vi.fn<(path: string | URL) => Promise>();
-vi.mock('fs/promises', () => ({
- readFile: (...args: unknown[]) => mockReadFile(args[0] as string),
-}));
-
-// Mock logger to suppress output during tests
-vi.mock('../../../src/utils/logger.js', () => ({
- logger: {
- info: vi.fn(),
- warn: vi.fn(),
- error: vi.fn(),
- debug: vi.fn(),
- },
-}));
-
-// ─── Helpers ─────────────────────────────────────────────────────────
-
-const PROJECTS_DIR = '/home/user/.claude/projects';
-
-/** Build a valid summary.md path from parts */
-function buildSummaryPath(
- sanitizedPath: string,
- sessionId: string,
-): string {
- return `${PROJECTS_DIR}/${sanitizedPath}/${sessionId}/session-memory/summary.md`;
-}
-
-/** Create a default WatcherConfig for tests */
-function createTestConfig(
- overrides: Partial<{
- projectsDir: string;
- debounceMs: number;
- onMemoryUpdate: (event: SessionMemoryEvent) => Promise;
- }> = {},
-) {
- return {
- projectsDir: PROJECTS_DIR,
- debounceMs: 100, // Short debounce for fast tests
- onMemoryUpdate: vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined),
- ...overrides,
- };
-}
-
-// ─── Tests ───────────────────────────────────────────────────────────
-
-describe('SessionMemoryWatcher', () => {
- // Use fake timers for debounce testing
- beforeEach(() => {
- vi.useFakeTimers();
- vi.clearAllMocks();
- });
-
- afterEach(async () => {
- vi.useRealTimers();
- });
-
- // Lazy import to ensure mocks are registered before module loads
- async function importWatcher() {
- const mod = await import(
- '../../../src/integrations/session-memory/SessionMemoryWatcher.js'
- );
- return mod.SessionMemoryWatcher;
- }
-
- // ── 1. Construction ──────────────────────────────────────────────
-
- describe('Construction', () => {
- it('should create a watcher with valid config', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- expect(watcher).toBeDefined();
- expect(watcher.isWatching).toBe(false);
- });
-
- it('should use default debounceMs when not provided', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- delete (config as Record)['debounceMs'];
- const watcher = new SessionMemoryWatcher(config);
-
- expect(watcher).toBeDefined();
- });
- });
-
- // ── 2. Start/Stop Lifecycle ──────────────────────────────────────
-
- describe('Start/Stop Lifecycle', () => {
- it('should initialize chokidar watcher on start()', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const chokidar = await import('chokidar');
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- expect(chokidar.default.watch).toHaveBeenCalledWith(
- expect.stringContaining('**/session-memory/summary.md'),
- expect.objectContaining({
- persistent: true,
- ignoreInitial: true,
- }),
- );
- expect(watcher.isWatching).toBe(true);
- });
-
- it('should close chokidar watcher on stop()', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
- expect(watcher.isWatching).toBe(true);
-
- await watcher.stop();
- expect(watcher.isWatching).toBe(false);
- expect(mockWatcherInstance.closed).toBe(true);
- });
-
- it('should be safe to call stop() without start()', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- // Should not throw
- await watcher.stop();
- expect(watcher.isWatching).toBe(false);
- });
-
- it('should be safe to call start() multiple times', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const chokidar = await import('chokidar');
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
- const firstInstance = mockWatcherInstance;
-
- await watcher.start();
- // Should close the previous watcher and create a new one
- expect(firstInstance.closed).toBe(true);
- expect(chokidar.default.watch).toHaveBeenCalledTimes(2);
- expect(watcher.isWatching).toBe(true);
-
- await watcher.stop();
- });
- });
-
- // ── 3. File Path Parsing ─────────────────────────────────────────
-
- describe('File Path Parsing', () => {
- it('should extract sessionId and sanitizedPath from valid path', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- const filePath = buildSummaryPath('-Users-ktseng', 'abc12345-uuid');
- // Access private method via bracket notation for testing
- const result = (watcher as unknown as Record { sessionId: string; sanitizedPath: string }>)
- .parseFilePath(filePath);
-
- expect(result.sessionId).toBe('abc12345-uuid');
- expect(result.sanitizedPath).toBe('-Users-ktseng');
- });
-
- it('should handle complex sanitized paths', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- const filePath = buildSummaryPath(
- '-Users-ktseng-Developer-Projects-my-app',
- 'session-001-abcdef',
- );
- const result = (watcher as unknown as Record { sessionId: string; sanitizedPath: string }>)
- .parseFilePath(filePath);
-
- expect(result.sessionId).toBe('session-001-abcdef');
- expect(result.sanitizedPath).toBe('-Users-ktseng-Developer-Projects-my-app');
- });
- });
-
- // ── 4. Path Desanitization ───────────────────────────────────────
-
- describe('Path Desanitization', () => {
- it('should convert sanitized path back to filesystem path', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- const result = (watcher as unknown as Record string>)
- .desanitizePath('-Users-ktseng');
-
- expect(result).toBe('/Users/ktseng');
- });
-
- it('should handle deeper paths', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const config = createTestConfig();
- const watcher = new SessionMemoryWatcher(config);
-
- const result = (watcher as unknown as Record string>)
- .desanitizePath('-Users-ktseng-Developer-Projects');
-
- expect(result).toBe('/Users/ktseng/Developer/Projects');
- });
- });
-
- // ── 5. Content Hash Deduplication ────────────────────────────────
-
- describe('Content Hash Deduplication', () => {
- it('should emit only once for identical content', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'session-aaa');
- const fileContent = '# Session Title\nTest session content';
- mockReadFile.mockResolvedValue(fileContent);
-
- // First change
- mockWatcherInstance.emit('add', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Second change with same content
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(1);
-
- await watcher.stop();
- });
-
- it('should emit again when content changes', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'session-bbb');
-
- // First change
- mockReadFile.mockResolvedValue('Content v1');
- mockWatcherInstance.emit('add', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Second change with different content
- mockReadFile.mockResolvedValue('Content v2');
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(2);
-
- await watcher.stop();
- });
- });
-
- // ── 6. Debounce ──────────────────────────────────────────────────
-
- describe('Debounce', () => {
- it('should debounce multiple rapid changes into single callback', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate, debounceMs: 200 });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'session-ccc');
- mockReadFile.mockResolvedValue('Debounced content');
-
- // Fire 5 rapid changes
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(50);
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(50);
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(50);
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(50);
- mockWatcherInstance.emit('change', filePath);
-
- // Not enough time yet - should not have fired
- expect(onMemoryUpdate).not.toHaveBeenCalled();
-
- // Advance past debounce threshold
- await vi.advanceTimersByTimeAsync(250);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(1);
-
- await watcher.stop();
- });
-
- it('should debounce per-file independently', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate, debounceMs: 200 });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const file1 = buildSummaryPath('-Users-ktseng', 'session-111');
- const file2 = buildSummaryPath('-Users-ktseng', 'session-222');
-
- // Different content per file to avoid dedup
- mockReadFile.mockImplementation(async (path: string | URL) => {
- const pathStr = typeof path === 'string' ? path : path.toString();
- if (pathStr.includes('session-111')) return 'File 1 content';
- return 'File 2 content';
- });
-
- // Fire changes on both files
- mockWatcherInstance.emit('add', file1);
- mockWatcherInstance.emit('add', file2);
-
- // Advance past debounce
- await vi.advanceTimersByTimeAsync(250);
-
- // Both should fire
- expect(onMemoryUpdate).toHaveBeenCalledTimes(2);
-
- await watcher.stop();
- });
- });
-
- // ── 7. Event Emission ────────────────────────────────────────────
-
- describe('Event Emission', () => {
- it('should emit correct SessionMemoryEvent structure on add', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'session-event-test');
- const content = '# Session Title\nEvent test content';
- mockReadFile.mockResolvedValue(content);
-
- mockWatcherInstance.emit('add', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(1);
-
- const event = onMemoryUpdate.mock.calls[0][0];
- expect(event).toEqual(
- expect.objectContaining({
- sessionId: 'session-event-test',
- sanitizedPath: '-Users-ktseng',
- summaryPath: filePath,
- content,
- changeType: 'created',
- }),
- );
- // projectPath is the desanitized path
- expect(event.projectPath).toBe('/Users/ktseng');
- // timestamp should be a Date
- expect(event.timestamp).toBeInstanceOf(Date);
-
- await watcher.stop();
- });
- });
-
- // ── 8. Change Type Detection ─────────────────────────────────────
-
- describe('Change Type Detection', () => {
- it('should detect "created" for new files (add event)', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'new-session');
- mockReadFile.mockResolvedValue('New file content');
-
- mockWatcherInstance.emit('add', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate.mock.calls[0][0].changeType).toBe('created');
-
- await watcher.stop();
- });
-
- it('should detect "updated" for modified files (change event)', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'existing-session');
-
- // First emit as add (to set up the content hash)
- mockReadFile.mockResolvedValue('Original content');
- mockWatcherInstance.emit('add', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Then emit as change with different content
- mockReadFile.mockResolvedValue('Updated content');
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(2);
- expect(onMemoryUpdate.mock.calls[1][0].changeType).toBe('updated');
-
- await watcher.stop();
- });
- });
-
- // ── 9. Error Handling ────────────────────────────────────────────
-
- describe('Error Handling', () => {
- it('should not crash when readFile fails', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'error-session');
- mockReadFile.mockRejectedValue(new Error('ENOENT: no such file'));
-
- // Should not throw
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Callback should NOT have been called
- expect(onMemoryUpdate).not.toHaveBeenCalled();
- // Watcher should still be running
- expect(watcher.isWatching).toBe(true);
-
- await watcher.stop();
- });
-
- it('should not crash when onMemoryUpdate callback throws', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockRejectedValue(
- new Error('Callback error'),
- );
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const filePath = buildSummaryPath('-Users-ktseng', 'callback-error-session');
- mockReadFile.mockResolvedValue('Some content');
-
- // Should not throw
- mockWatcherInstance.emit('change', filePath);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Callback was called but threw
- expect(onMemoryUpdate).toHaveBeenCalledTimes(1);
- // Watcher should still be running
- expect(watcher.isWatching).toBe(true);
-
- await watcher.stop();
- });
- });
-
- // ── 10. Multiple Files ───────────────────────────────────────────
-
- describe('Multiple Files', () => {
- it('should track different summary.md files independently', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const file1 = buildSummaryPath('-Users-ktseng', 'session-alpha');
- const file2 = buildSummaryPath('-Users-ktseng', 'session-beta');
- const file3 = buildSummaryPath('-Users-other', 'session-gamma');
-
- mockReadFile.mockImplementation(async (path: string | URL) => {
- const pathStr = typeof path === 'string' ? path : path.toString();
- if (pathStr.includes('alpha')) return 'Alpha content';
- if (pathStr.includes('beta')) return 'Beta content';
- return 'Gamma content';
- });
-
- // Trigger all three files
- mockWatcherInstance.emit('add', file1);
- mockWatcherInstance.emit('add', file2);
- mockWatcherInstance.emit('add', file3);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(3);
-
- // Verify each event has correct sessionId
- const sessionIds = onMemoryUpdate.mock.calls.map(
- (call) => call[0].sessionId,
- );
- expect(sessionIds).toContain('session-alpha');
- expect(sessionIds).toContain('session-beta');
- expect(sessionIds).toContain('session-gamma');
-
- await watcher.stop();
- });
-
- it('should dedup per file, not globally', async () => {
- const SessionMemoryWatcher = await importWatcher();
- const onMemoryUpdate = vi.fn<(event: SessionMemoryEvent) => Promise>().mockResolvedValue(undefined);
- const config = createTestConfig({ onMemoryUpdate });
- const watcher = new SessionMemoryWatcher(config);
-
- await watcher.start();
-
- const file1 = buildSummaryPath('-Users-ktseng', 'session-x');
- const file2 = buildSummaryPath('-Users-ktseng', 'session-y');
-
- // Both files have the same content
- mockReadFile.mockResolvedValue('Identical content');
-
- // Both should still emit because they are different files
- mockWatcherInstance.emit('add', file1);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- mockWatcherInstance.emit('add', file2);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- expect(onMemoryUpdate).toHaveBeenCalledTimes(2);
-
- // Now re-emit same files with same content - should be deduped per file
- mockWatcherInstance.emit('change', file1);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- mockWatcherInstance.emit('change', file2);
- await vi.advanceTimersByTimeAsync(config.debounceMs + 50);
-
- // Still only 2 calls total (deduped per file)
- expect(onMemoryUpdate).toHaveBeenCalledTimes(2);
-
- await watcher.stop();
- });
- });
-});
diff --git a/tests/knowledge-graph.test.ts b/tests/knowledge-graph.test.ts
new file mode 100644
index 00000000..e0d2734a
--- /dev/null
+++ b/tests/knowledge-graph.test.ts
@@ -0,0 +1,294 @@
+import { describe, it, expect, beforeEach, afterEach } from 'vitest';
+import { openDatabase, closeDatabase } from '../src/db.js';
+import { KnowledgeGraph } from '../src/knowledge-graph.js';
+import type { CreateEntityInput } from '../src/knowledge-graph.js';
+import Database from 'better-sqlite3';
+import fs from 'fs';
+import path from 'path';
+import os from 'os';
+
+describe('Feature: Knowledge Graph', () => {
+ let testDir: string;
+ let testDbPath: string;
+ let db: Database.Database;
+ let kg: KnowledgeGraph;
+
+ beforeEach(() => {
+ testDir = path.join(
+ os.tmpdir(),
+ `memesh-kg-test-${Date.now()}-${Math.random().toString(36).slice(2)}`
+ );
+ fs.mkdirSync(testDir, { recursive: true });
+ testDbPath = path.join(testDir, 'test.db');
+ db = openDatabase(testDbPath);
+ kg = new KnowledgeGraph(db);
+ });
+
+ afterEach(() => {
+ try {
+ closeDatabase();
+ } catch {}
+ fs.rmSync(testDir, { recursive: true, force: true });
+ });
+
+ describe('Remember (Create)', () => {
+ it('should create a new entity with created_at', () => {
+ const id = kg.createEntity('TypeScript', 'language');
+ expect(id).toBeGreaterThan(0);
+
+ const entity = kg.getEntity('TypeScript');
+ expect(entity).not.toBeNull();
+ expect(entity!.name).toBe('TypeScript');
+ expect(entity!.type).toBe('language');
+ expect(entity!.created_at).toBeDefined();
+ });
+
+ it('should create entity with observations', () => {
+ kg.createEntity('TypeScript', 'language', {
+ observations: ['Superset of JavaScript', 'Has static typing'],
+ });
+
+ const entity = kg.getEntity('TypeScript');
+ expect(entity!.observations).toHaveLength(2);
+ expect(entity!.observations).toContain('Superset of JavaScript');
+ expect(entity!.observations).toContain('Has static typing');
+ });
+
+ it('should create entity with tags', () => {
+ kg.createEntity('TypeScript', 'language', {
+ tags: ['programming', 'frontend'],
+ });
+
+ const entity = kg.getEntity('TypeScript');
+ expect(entity!.tags).toHaveLength(2);
+ expect(entity!.tags).toContain('programming');
+ expect(entity!.tags).toContain('frontend');
+ });
+
+ it('should create entity with relations that are queryable', () => {
+ kg.createEntity('TypeScript', 'language');
+ kg.createEntity('JavaScript', 'language');
+ kg.createRelation('TypeScript', 'JavaScript', 'extends');
+
+ const entity = kg.getEntity('TypeScript');
+ expect(entity!.relations).toBeDefined();
+ expect(entity!.relations).toHaveLength(1);
+ expect(entity!.relations![0]).toEqual({
+ from: 'TypeScript',
+ to: 'JavaScript',
+ type: 'extends',
+ metadata: undefined,
+ });
+ });
+
+ it('should append observations on duplicate entity (upsert)', () => {
+ kg.createEntity('TypeScript', 'language', {
+ observations: ['First observation'],
+ });
+ kg.createEntity('TypeScript', 'language', {
+ observations: ['Second observation'],
+ });
+
+ const entity = kg.getEntity('TypeScript');
+ expect(entity!.observations).toHaveLength(2);
+ expect(entity!.observations).toContain('First observation');
+ expect(entity!.observations).toContain('Second observation');
+ });
+
+ it('should batch create all entities in single transaction', () => {
+ const entities: CreateEntityInput[] = [
+ {
+ name: 'Entity1',
+ type: 'test',
+ observations: ['obs1'],
+ tags: ['tag1'],
+ },
+ {
+ name: 'Entity2',
+ type: 'test',
+ observations: ['obs2'],
+ tags: ['tag2'],
+ },
+ {
+ name: 'Entity3',
+ type: 'test',
+ observations: ['obs3'],
+ },
+ ];
+
+ kg.createEntitiesBatch(entities);
+
+ expect(kg.getEntity('Entity1')).not.toBeNull();
+ expect(kg.getEntity('Entity2')).not.toBeNull();
+ expect(kg.getEntity('Entity3')).not.toBeNull();
+ expect(kg.getEntity('Entity1')!.observations).toEqual(['obs1']);
+ expect(kg.getEntity('Entity2')!.tags).toEqual(['tag2']);
+ });
+ });
+
+ describe('Recall (Search)', () => {
+ it('should search by keyword via FTS5 and find entity by observation content', () => {
+ kg.createEntity('React', 'framework', {
+ observations: ['A library for building user interfaces'],
+ });
+ kg.createEntity('Vue', 'framework', {
+ observations: ['A progressive JavaScript framework'],
+ });
+
+ const results = kg.search('interfaces');
+ expect(results).toHaveLength(1);
+ expect(results[0].name).toBe('React');
+ });
+
+ it('should filter search results by tag', () => {
+ kg.createEntity('React', 'framework', {
+ observations: ['Uses virtual DOM'],
+ tags: ['frontend'],
+ });
+ kg.createEntity('Express', 'framework', {
+ observations: ['Uses middleware pattern with virtual routing'],
+ tags: ['backend'],
+ });
+
+ const results = kg.search('virtual', { tag: 'frontend' });
+ expect(results).toHaveLength(1);
+ expect(results[0].name).toBe('React');
+ });
+
+ it('should return related entities via getEntity', () => {
+ kg.createEntity('React', 'framework', {
+ observations: ['Component-based UI library'],
+ });
+ kg.createEntity('Redux', 'library', {
+ observations: ['State management for React'],
+ });
+ kg.createRelation('React', 'Redux', 'uses');
+
+ const entity = kg.getEntity('React');
+ expect(entity!.relations).toBeDefined();
+ expect(entity!.relations![0].to).toBe('Redux');
+ });
+
+ it('should return empty array when no results match', () => {
+ kg.createEntity('React', 'framework', {
+ observations: ['A UI library'],
+ });
+
+ const results = kg.search('nonexistentterm');
+ expect(results).toEqual([]);
+ });
+
+ it('should list recent entities ordered by created_at DESC', () => {
+ kg.createEntity('First', 'test');
+ kg.createEntity('Second', 'test');
+ kg.createEntity('Third', 'test');
+
+ const recent = kg.listRecent(2);
+ expect(recent).toHaveLength(2);
+ // Most recent first
+ expect(recent[0].name).toBe('Third');
+ expect(recent[1].name).toBe('Second');
+ });
+
+ it('should return listRecent when search query is empty', () => {
+ kg.createEntity('Alpha', 'test');
+ kg.createEntity('Beta', 'test');
+
+ const results = kg.search('');
+ expect(results.length).toBeGreaterThanOrEqual(2);
+ // Should be ordered by created_at DESC
+ expect(results[0].name).toBe('Beta');
+ expect(results[1].name).toBe('Alpha');
+ });
+ });
+
+ describe('Forget (Delete)', () => {
+ it('should cascade delete entity, observations, relations, tags, and FTS', () => {
+ kg.createEntity('ToDelete', 'test', {
+ observations: ['some observation'],
+ tags: ['sometag'],
+ });
+ kg.createEntity('Related', 'test');
+ kg.createRelation('ToDelete', 'Related', 'links-to');
+
+ const result = kg.deleteEntity('ToDelete');
+ expect(result).toEqual({ deleted: true });
+
+ // Entity gone
+ expect(kg.getEntity('ToDelete')).toBeNull();
+
+ // Observations gone (via CASCADE)
+ const obs = db
+ .prepare(
+ "SELECT COUNT(*) as c FROM observations WHERE entity_id NOT IN (SELECT id FROM entities)"
+ )
+ .get() as any;
+ expect(obs.c).toBe(0);
+
+ // Tags gone (via CASCADE)
+ const tags = db
+ .prepare(
+ "SELECT COUNT(*) as c FROM tags WHERE entity_id NOT IN (SELECT id FROM entities)"
+ )
+ .get() as any;
+ expect(tags.c).toBe(0);
+
+ // FTS gone
+ const fts = db
+ .prepare("SELECT COUNT(*) as c FROM entities_fts WHERE name = 'ToDelete'")
+ .get() as any;
+ expect(fts.c).toBe(0);
+
+ // Relation gone (via CASCADE on from_entity_id)
+ const rels = db
+ .prepare('SELECT COUNT(*) as c FROM relations')
+ .get() as any;
+ expect(rels.c).toBe(0);
+ });
+
+ it('should return { deleted: false } for non-existent entity', () => {
+ const result = kg.deleteEntity('DoesNotExist');
+ expect(result).toEqual({ deleted: false });
+ });
+ });
+
+ describe('Edge Cases', () => {
+ it('should throw when creating relation with non-existent entity', () => {
+ kg.createEntity('Exists', 'test');
+ expect(() => kg.createRelation('Exists', 'Ghost', 'links')).toThrow(
+ 'Entity not found: Ghost'
+ );
+ expect(() => kg.createRelation('Ghost', 'Exists', 'links')).toThrow(
+ 'Entity not found: Ghost'
+ );
+ });
+
+ it('should handle entity with metadata', () => {
+ kg.createEntity('WithMeta', 'test', {
+ metadata: { version: '1.0', priority: 'high' },
+ });
+
+ const entity = kg.getEntity('WithMeta');
+ expect(entity!.metadata).toEqual({ version: '1.0', priority: 'high' });
+ });
+
+ it('should handle relation with metadata', () => {
+ kg.createEntity('A', 'test');
+ kg.createEntity('B', 'test');
+ kg.createRelation('A', 'B', 'depends-on', { weight: 0.8 });
+
+ const relations = kg.getRelations('A');
+ expect(relations[0].metadata).toEqual({ weight: 0.8 });
+ });
+
+ it('should search by entity name via FTS', () => {
+ kg.createEntity('UniqueProjectName', 'project', {
+ observations: ['A special project'],
+ });
+
+ const results = kg.search('UniqueProjectName');
+ expect(results).toHaveLength(1);
+ expect(results[0].name).toBe('UniqueProjectName');
+ });
+ });
+});
diff --git a/tests/mcp/BuddyCommands.test.ts b/tests/mcp/BuddyCommands.test.ts
deleted file mode 100644
index b5ad4c1b..00000000
--- a/tests/mcp/BuddyCommands.test.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-import { describe, it, expect } from 'vitest';
-import { BuddyCommands } from '../../src/mcp/BuddyCommands';
-
-describe('BuddyCommands', () => {
- it('should parse "buddy do" command', () => {
- const result = BuddyCommands.parse('buddy do setup authentication');
- expect(result.command).toBe('do');
- expect(result.args).toBe('setup authentication');
- });
-
- it('should parse "buddy remember" command', () => {
- const result = BuddyCommands.parse('buddy remember api design decisions');
- expect(result.command).toBe('remember');
- expect(result.args).toBe('api design decisions');
- });
-
- it('should handle command aliases', () => {
- const result1 = BuddyCommands.parse('buddy help-with setup auth');
- expect(result1.command).toBe('do'); // 'help-with' is alias for 'do'
-
- const result2 = BuddyCommands.parse('buddy recall some memory');
- expect(result2.command).toBe('remember'); // 'recall' is alias
- });
-
- it('should return help for unknown commands', () => {
- const result = BuddyCommands.parse('buddy unknown command');
- expect(result.command).toBe('help');
- });
-
- it('should handle commands without "buddy" prefix', () => {
- const result = BuddyCommands.parse('do setup authentication');
- expect(result.command).toBe('do');
- expect(result.args).toBe('setup authentication');
- });
-
- it('should preserve original input', () => {
- const result = BuddyCommands.parse('buddy do setup authentication');
- expect(result.originalInput).toBe('buddy do setup authentication');
- });
-});
diff --git a/tests/mcp/HumanInLoopUI.test.ts b/tests/mcp/HumanInLoopUI.test.ts
deleted file mode 100644
index 89460781..00000000
--- a/tests/mcp/HumanInLoopUI.test.ts
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * HumanInLoopUI Test
- *
- * Tests for human-in-the-loop confirmation UI formatting
- */
-
-import { describe, it, expect } from 'vitest';
-import { HumanInLoopUI, type ConfirmationRequest, type ConfirmationResponse } from '../../src/mcp/HumanInLoopUI.js';
-
-describe('HumanInLoopUI', () => {
- describe('formatConfirmationRequest', () => {
- it('should format confirmation request with recommendation', () => {
- const ui = new HumanInLoopUI();
-
- const request: ConfirmationRequest = {
- taskDescription: 'Review this code for security vulnerabilities',
- recommendedAgent: 'code-reviewer',
- confidence: 0.85,
- reasoning: [
- 'Task involves code review',
- 'Security focus detected',
- 'Code-reviewer specializes in security analysis',
- ],
- alternatives: [
- { agent: 'security-auditor', confidence: 0.72, reason: 'Specialized in security' },
- { agent: 'debugger', confidence: 0.45, reason: 'Can identify issues' },
- ],
- };
-
- const formatted = ui.formatConfirmationRequest(request);
-
- // Should contain recommendation
- expect(formatted).toContain('code-reviewer');
- expect(formatted).toContain('85%');
-
- // Should contain reasoning
- expect(formatted).toContain('code review');
- expect(formatted).toContain('security');
-
- // Should contain alternatives
- expect(formatted).toContain('security-auditor');
- expect(formatted).toContain('72%');
-
- // Should contain clear prompt
- expect(formatted).toContain('[y/n');
- expect(formatted).toMatch(/\[y\/n\/[0-9]-[0-9]\]/);
- });
-
- it('should format minimal request without alternatives', () => {
- const ui = new HumanInLoopUI();
-
- const request: ConfirmationRequest = {
- taskDescription: 'Simple task',
- recommendedAgent: 'general-agent',
- confidence: 0.95,
- reasoning: ['General purpose task'],
- alternatives: [],
- };
-
- const formatted = ui.formatConfirmationRequest(request);
-
- expect(formatted).toContain('general-agent');
- expect(formatted).toContain('95%');
- expect(formatted).toContain('General purpose task');
- expect(formatted).toContain('[y/n]');
- });
- });
-
- describe('parseUserResponse', () => {
- const request: ConfirmationRequest = {
- taskDescription: 'Test task',
- recommendedAgent: 'code-reviewer',
- confidence: 0.8,
- reasoning: ['Test reason'],
- alternatives: [
- { agent: 'security-auditor', confidence: 0.7, reason: 'Alternative 1' },
- { agent: 'debugger', confidence: 0.5, reason: 'Alternative 2' },
- ],
- };
-
- it('should parse "y" as accept recommendation', () => {
- const ui = new HumanInLoopUI();
-
- const response = ui.parseUserResponse('y', request);
-
- expect(response.accepted).toBe(true);
- expect(response.selectedAgent).toBe('code-reviewer');
- expect(response.wasOverridden).toBe(false);
- });
-
- it('should parse "n" as reject recommendation', () => {
- const ui = new HumanInLoopUI();
-
- const response = ui.parseUserResponse('n', request);
-
- expect(response.accepted).toBe(false);
- expect(response.selectedAgent).toBeUndefined();
- expect(response.wasOverridden).toBe(false);
- });
-
- it('should parse "1" as select first alternative', () => {
- const ui = new HumanInLoopUI();
-
- const response = ui.parseUserResponse('1', request);
-
- expect(response.accepted).toBe(true);
- expect(response.selectedAgent).toBe('security-auditor');
- expect(response.wasOverridden).toBe(true);
- });
-
- it('should parse "2" as select second alternative', () => {
- const ui = new HumanInLoopUI();
-
- const response = ui.parseUserResponse('2', request);
-
- expect(response.accepted).toBe(true);
- expect(response.selectedAgent).toBe('debugger');
- expect(response.wasOverridden).toBe(true);
- });
-
- it('should handle invalid input gracefully', () => {
- const ui = new HumanInLoopUI();
-
- const response = ui.parseUserResponse('invalid', request);
-
- expect(response.accepted).toBe(false);
- expect(response.selectedAgent).toBeUndefined();
- });
-
- it('should return properly typed ConfirmationResponse', () => {
- const ui = new HumanInLoopUI();
-
- const response: ConfirmationResponse = ui.parseUserResponse('y', request);
-
- // Verify the response conforms to ConfirmationResponse type
- expect(typeof response.accepted).toBe('boolean');
- expect('wasOverridden' in response).toBe(true);
- // selectedAgent is optional, so just verify the property exists or is undefined
- expect(response.selectedAgent === undefined || typeof response.selectedAgent === 'string').toBe(true);
- });
- });
-});
diff --git a/tests/mcp/server-tools.test.ts b/tests/mcp/server-tools.test.ts
deleted file mode 100644
index d5ac0086..00000000
--- a/tests/mcp/server-tools.test.ts
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * MCP Server Tools Test
- *
- * Tests that verify MCP server exposes correct tools
- */
-
-import { describe, it, expect } from 'vitest';
-import { AgentRegistry } from '../../src/core/AgentRegistry.js';
-
-describe('MCP Server Tools', () => {
- describe('AgentRegistry', () => {
- it('should register available agents', () => {
- const registry = new AgentRegistry();
- const allAgents = registry.getAllAgents();
-
- expect(allAgents.length).toBeGreaterThan(0);
- });
-
- it('should have agents from all categories', () => {
- const registry = new AgentRegistry();
- const allAgents = registry.getAllAgents();
-
- const categories = new Set(allAgents.map(a => a.category));
-
- expect(categories).toContain('development');
- expect(categories).toContain('analysis');
- expect(categories).toContain('operations');
- expect(categories).toContain('creative');
- expect(categories).toContain('management');
- expect(categories).toContain('engineering');
- });
-
- it('should include development-butler agent', () => {
- const registry = new AgentRegistry();
- const butler = registry.getAgent('development-butler');
-
- expect(butler).toBeDefined();
- expect(butler?.category).toBe('development');
- expect(butler?.description).toContain('workflow automation');
- });
- });
-});
diff --git a/tests/postinstall/fixtures/corrupted-marketplace.json b/tests/postinstall/fixtures/corrupted-marketplace.json
deleted file mode 100644
index 8ae5456d..00000000
--- a/tests/postinstall/fixtures/corrupted-marketplace.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "superpowers-marketplace": {
- "source": {
- "source": "github",
- "repo": "obra/superpowers-marketplace"
- },
- "installLocation": "/Users/test/.claude/plugins/marketplaces/superpowers-marketplace",
- "lastUpdated": "2025-12-23T22:35:57.229Z"
- },
- "invalid-entry": {
- "missing-fields": "this is corrupted"
- // Missing closing brace - invalid JSON
diff --git a/tests/postinstall/fixtures/valid-marketplace.json b/tests/postinstall/fixtures/valid-marketplace.json
deleted file mode 100644
index 3ff02681..00000000
--- a/tests/postinstall/fixtures/valid-marketplace.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "superpowers-marketplace": {
- "source": {
- "source": "github",
- "repo": "obra/superpowers-marketplace"
- },
- "installLocation": "/Users/test/.claude/plugins/marketplaces/superpowers-marketplace",
- "lastUpdated": "2025-12-23T22:35:57.229Z"
- },
- "claude-plugins-official": {
- "source": {
- "source": "github",
- "repo": "anthropics/claude-plugins-official"
- },
- "installLocation": "/Users/test/.claude/plugins/marketplaces/claude-plugins-official",
- "lastUpdated": "2026-02-11T16:20:18.907Z"
- }
-}
diff --git a/tests/postinstall/integration.test.ts b/tests/postinstall/integration.test.ts
deleted file mode 100644
index 098aabb5..00000000
--- a/tests/postinstall/integration.test.ts
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Integration Tests - End-to-End Scenarios
- *
- * Tests plugin installation logic (marketplace, symlink, plugin enablement).
- * MCP and hooks are handled by the Claude Code plugin system via .mcp.json and hooks/hooks.json.
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { TestEnvironment } from './setup';
-import {
- detectInstallMode,
- ensureMarketplaceRegistered,
- ensureSymlinkExists,
- ensurePluginEnabled,
- detectAndFixLegacyInstall
-} from '../../scripts/postinstall-lib';
-
-describe('Integration: Plugin Enablement', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('plugin-enable');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- it('should create settings.json and enable plugin', async () => {
- // Given: no settings.json
- expect(env.fileExists('settings.json')).toBe(false);
-
- // When: ensurePluginEnabled()
- await ensurePluginEnabled(env.claudeDir);
-
- // Then: settings.json created with memesh enabled
- expect(env.fileExists('settings.json')).toBe(true);
- const settings = JSON.parse(env.readFile('settings.json'));
- expect(settings.enabledPlugins['memesh@pcircle-ai']).toBe(true);
- });
-
- it('should preserve other plugins when enabling', async () => {
- // Given: settings.json with other plugins
- env.createFile('settings.json', JSON.stringify({
- enabledPlugins: {
- 'other-plugin@marketplace': true
- },
- otherSettings: 'value'
- }, null, 2));
-
- // When: ensurePluginEnabled()
- await ensurePluginEnabled(env.claudeDir);
-
- // Then: memesh enabled, others preserved
- const settings = JSON.parse(env.readFile('settings.json'));
- expect(settings.enabledPlugins['memesh@pcircle-ai']).toBe(true);
- expect(settings.enabledPlugins['other-plugin@marketplace']).toBe(true);
- expect(settings.otherSettings).toBe('value');
- });
-});
-
-describe('Integration: Legacy MCP Cleanup', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('mcp-cleanup');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- it('should clean up legacy MCP config during legacy fix', async () => {
- // Given: legacy MCP config with memesh entry
- env.createFile('mcp_settings.json', JSON.stringify({
- mcpServers: {
- memesh: {
- command: 'npx',
- args: ['-y', '@pcircle/memesh']
- },
- 'other-server': {
- command: 'node',
- args: ['/other/path']
- }
- }
- }, null, 2));
-
- // When: detectAndFixLegacyInstall()
- await detectAndFixLegacyInstall(env.installPath, env.claudeDir);
-
- // Then: memesh removed from mcp_settings, other servers preserved
- const config = JSON.parse(env.readFile('mcp_settings.json'));
- expect(config.mcpServers.memesh).toBeUndefined();
- expect(config.mcpServers['other-server']).toBeDefined();
- });
-
- it('should clean up legacy claude-code-buddy MCP entry', async () => {
- // Given: legacy MCP config with claude-code-buddy entry
- env.createFile('mcp_settings.json', JSON.stringify({
- mcpServers: {
- 'claude-code-buddy': {
- command: 'node',
- args: ['/old/path']
- }
- }
- }, null, 2));
-
- // When: detectAndFixLegacyInstall()
- await detectAndFixLegacyInstall(env.installPath, env.claudeDir);
-
- // Then: legacy entry removed
- const config = JSON.parse(env.readFile('mcp_settings.json'));
- expect(config.mcpServers['claude-code-buddy']).toBeUndefined();
- });
-});
-
-describe('Integration: Backward Compatibility', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('backward-compat');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- it('should detect v2.8.4 legacy installation', async () => {
- // Given: v2.8.4 setup (MCP but no marketplace)
- env.setupLegacyV284();
-
- // When: detectAndFixLegacyInstall()
- const result = await detectAndFixLegacyInstall(env.installPath, env.claudeDir);
-
- // Then: fixed
- expect(result).toBe('fixed');
-
- // Verify marketplace registered
- expect(env.fileExists('plugins/known_marketplaces.json')).toBe(true);
- const marketplaces = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(marketplaces['pcircle-ai']).toBeDefined();
- });
-
- it('should not modify correct v2.8.5 installation', async () => {
- // Given: correct v2.8.5 setup
- env.setupCorrectV285();
-
- // When: detectAndFixLegacyInstall()
- const result = await detectAndFixLegacyInstall(env.installPath, env.claudeDir);
-
- // Then: returns ok, no changes
- expect(result).toBe('ok');
- });
-});
-
-describe('Integration: Complete Installation Flow (E2E)', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('e2e');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- it('should complete fresh installation successfully', async () => {
- // Given: clean system (no ~/.claude setup)
- const installPath = env.installPath;
-
- // When: run complete installation (no MCP config — plugin system handles it)
- await ensureMarketplaceRegistered(installPath, env.claudeDir);
- await ensureSymlinkExists(installPath, env.marketplacesDir);
- await ensurePluginEnabled(env.claudeDir);
-
- // Then: all components configured correctly
- // 1. Marketplace registered
- const marketplaces = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(marketplaces['pcircle-ai']).toBeDefined();
-
- // 2. Symlink created
- const { existsSync } = await import('fs');
- expect(existsSync(`${env.marketplacesDir}/pcircle-ai`)).toBe(true);
-
- // 3. Plugin enabled
- const settings = JSON.parse(env.readFile('settings.json'));
- expect(settings.enabledPlugins['memesh@pcircle-ai']).toBe(true);
- });
-
- it('should upgrade from v2.8.4 successfully', async () => {
- // Given: v2.8.4 installation
- env.setupLegacyV284();
-
- // When: run upgrade (detectAndFix)
- await detectAndFixLegacyInstall(env.installPath, env.claudeDir);
-
- // Then: all issues fixed
- const marketplaces = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(marketplaces['pcircle-ai']).toBeDefined();
-
- const settings = JSON.parse(env.readFile('settings.json'));
- expect(settings.enabledPlugins['memesh@pcircle-ai']).toBe(true);
- });
-});
diff --git a/tests/postinstall/postinstall.test.ts b/tests/postinstall/postinstall.test.ts
deleted file mode 100644
index 6762ef3a..00000000
--- a/tests/postinstall/postinstall.test.ts
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Postinstall Tests - TDD Implementation
- *
- * Testing strategy: Red → Green → Refactor
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { mkdirSync, writeFileSync } from 'fs';
-import { join } from 'path';
-import { TestEnvironment } from './setup';
-import {
- detectInstallMode,
- getPluginInstallPath,
- ensureMarketplaceRegistered,
- ensureSymlinkExists
-} from '../../scripts/postinstall-lib';
-
-describe('Phase 1.1: Environment Detection', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('env-detection');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- describe('detectInstallMode', () => {
- it('should detect global install mode', () => {
- // Given: npm install from global path
- const installPath = '/Users/test/.nvm/versions/node/v22.22.0/lib/node_modules/@pcircle/memesh';
-
- // When: detectInstallMode(installPath)
- const mode = detectInstallMode(installPath);
-
- // Then: returns 'global'
- expect(mode).toBe('global');
- });
-
- it('should detect local dev mode', () => {
- // Given: running from project directory with src/ and package.json
- const installPath = env.testDir;
-
- // Create local dev indicators
- mkdirSync(join(installPath, 'src'), { recursive: true });
- writeFileSync(join(installPath, 'package.json'), '{}');
-
- // When: detectInstallMode(installPath)
- const mode = detectInstallMode(installPath);
-
- // Then: returns 'local'
- expect(mode).toBe('local');
- });
- });
-
- describe('getPluginInstallPath', () => {
- it('should get correct plugin path for global install', () => {
- // Given: global install mode with scriptDir
- const scriptDir = '/some/npm/path/node_modules/@pcircle/memesh/scripts';
-
- // When: getPluginInstallPath('global', scriptDir)
- const path = getPluginInstallPath('global', scriptDir);
-
- // Then: returns parent directory (removes /scripts)
- expect(path).toBe('/some/npm/path/node_modules/@pcircle/memesh');
- });
-
- it('should get correct plugin path for local dev', () => {
- // Given: local dev mode
- const cwd = process.cwd();
-
- // When: getPluginInstallPath('local')
- const path = getPluginInstallPath('local');
-
- // Then: returns project root (cwd)
- expect(path).toBe(cwd);
- });
-
- it('should fallback to cwd for global without scriptDir', () => {
- // Given: global mode but no scriptDir provided
- const cwd = process.cwd();
-
- // When: getPluginInstallPath('global')
- const path = getPluginInstallPath('global');
-
- // Then: returns cwd as fallback
- expect(path).toBe(cwd);
- });
- });
-});
-
-describe('Phase 1.2: Marketplace Registration', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('marketplace');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- describe('ensureMarketplaceRegistered', () => {
- it('should create known_marketplaces.json if not exists', async () => {
- // Given: no known_marketplaces.json
- expect(env.fileExists('plugins/known_marketplaces.json')).toBe(false);
-
- // When: ensureMarketplaceRegistered(installPath)
- await ensureMarketplaceRegistered(env.installPath, env.claudeDir);
-
- // Then: file created with pcircle-ai entry
- expect(env.fileExists('plugins/known_marketplaces.json')).toBe(true);
-
- const content = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(content['pcircle-ai']).toBeDefined();
- expect(content['pcircle-ai'].source.path).toBe(env.installPath);
- });
-
- it('should register new marketplace entry', async () => {
- // Given: known_marketplaces.json exists without pcircle-ai
- env.createFile('plugins/known_marketplaces.json', JSON.stringify({
- 'other-marketplace': {
- source: { source: 'github', repo: 'other/repo' },
- installLocation: '/test/path',
- lastUpdated: '2026-01-01T00:00:00.000Z'
- }
- }, null, 2));
-
- // When: ensureMarketplaceRegistered()
- await ensureMarketplaceRegistered(env.installPath, env.claudeDir);
-
- // Then: pcircle-ai entry added, other preserved
- const content = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(content['pcircle-ai']).toBeDefined();
- expect(content['other-marketplace']).toBeDefined();
- });
-
- it('should update existing marketplace entry', async () => {
- // Given: pcircle-ai already registered with old path
- const oldPath = '/old/path/to/memesh';
- env.createFile('plugins/known_marketplaces.json', JSON.stringify({
- 'pcircle-ai': {
- source: { source: 'local', path: oldPath },
- installLocation: '/old/symlink',
- lastUpdated: '2026-01-01T00:00:00.000Z'
- }
- }, null, 2));
-
- // When: ensureMarketplaceRegistered(newPath)
- const newPath = env.installPath;
- await ensureMarketplaceRegistered(newPath, env.claudeDir);
-
- // Then: installLocation updated, lastUpdated refreshed
- const content = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(content['pcircle-ai'].source.path).toBe(newPath);
- expect(content['pcircle-ai'].lastUpdated).not.toBe('2026-01-01T00:00:00.000Z');
- });
-
- it('should backup corrupted marketplace file', async () => {
- // Given: known_marketplaces.json with invalid JSON
- env.createFile('plugins/known_marketplaces.json', '{ invalid json }');
-
- // When: ensureMarketplaceRegistered()
- await ensureMarketplaceRegistered(env.installPath, env.claudeDir);
-
- // Then: new file with pcircle-ai entry created
- expect(env.fileExists('plugins/known_marketplaces.json')).toBe(true);
- const content = JSON.parse(env.readFile('plugins/known_marketplaces.json'));
- expect(content['pcircle-ai']).toBeDefined();
-
- // Note: Backup file check would require listing directory - simplified test
- });
- });
-});
-
-describe('Phase 1.3: Symlink Management', () => {
- let env: TestEnvironment;
-
- beforeEach(() => {
- env = new TestEnvironment('symlink');
- env.setup();
- });
-
- afterEach(() => {
- env.cleanup();
- });
-
- describe('ensureSymlinkExists', () => {
- it('should create symlink if not exists', async () => {
- // Given: no pcircle-ai symlink
- const symlinkPath = `${env.marketplacesDir}/pcircle-ai`;
- const { existsSync } = await import('fs');
- expect(existsSync(symlinkPath)).toBe(false);
-
- // When: ensureSymlinkExists(installPath)
- await ensureSymlinkExists(env.installPath, env.marketplacesDir);
-
- // Then: symlink created pointing to installPath
- expect(existsSync(symlinkPath)).toBe(true);
- });
-
- it('should update symlink if pointing to wrong location', async () => {
- // Given: symlink exists but points to old location
- const oldPath = join(env.testDir, 'old-path');
- mkdirSync(oldPath, { recursive: true });
- const symlinkPath = `${env.marketplacesDir}/pcircle-ai`;
-
- // Create old symlink
- const { symlinkSync } = await import('fs');
- symlinkSync(oldPath, symlinkPath, 'dir');
-
- // When: ensureSymlinkExists(newPath)
- await ensureSymlinkExists(env.installPath, env.marketplacesDir);
-
- // Then: symlink updated to newPath
- const { realpathSync, existsSync } = await import('fs');
- expect(existsSync(symlinkPath)).toBe(true);
- const target = realpathSync(symlinkPath);
- const expectedTarget = realpathSync(env.installPath);
- expect(target).toBe(expectedTarget);
- });
- });
-});
diff --git a/tests/postinstall/setup.ts b/tests/postinstall/setup.ts
deleted file mode 100644
index 6ba99a86..00000000
--- a/tests/postinstall/setup.ts
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Test utilities and setup for postinstall tests
- */
-
-import { existsSync, mkdirSync, mkdtempSync, rmSync, writeFileSync, readFileSync, symlinkSync, readlinkSync } from 'fs';
-import { join } from 'path';
-import { tmpdir } from 'os';
-
-/**
- * Test environment for isolated postinstall testing
- */
-export class TestEnvironment {
- public testDir: string;
- public claudeDir: string;
- public pluginsDir: string;
- public marketplacesDir: string;
- public installPath: string;
-
- constructor(name: string) {
- // Create isolated test directory with unpredictable name (secure temp)
- this.testDir = mkdtempSync(join(tmpdir(), `memesh-test-${name}-`));
- this.claudeDir = join(this.testDir, '.claude');
- this.pluginsDir = join(this.claudeDir, 'plugins');
- this.marketplacesDir = join(this.pluginsDir, 'marketplaces');
- this.installPath = join(this.testDir, 'node_modules', '@pcircle', 'memesh');
- }
-
- /**
- * Setup test environment with necessary directories
- */
- setup(): void {
- mkdirSync(this.testDir, { recursive: true });
- mkdirSync(this.claudeDir, { recursive: true });
- mkdirSync(this.pluginsDir, { recursive: true });
- mkdirSync(this.marketplacesDir, { recursive: true });
- mkdirSync(this.installPath, { recursive: true });
-
- // Create mock plugin structure
- this.createMockPlugin();
- }
-
- /**
- * Create mock plugin files in install path
- */
- private createMockPlugin(): void {
- // Create dist directory with server-bootstrap.js
- const distDir = join(this.installPath, 'dist', 'mcp');
- mkdirSync(distDir, { recursive: true });
- writeFileSync(
- join(distDir, 'server-bootstrap.js'),
- '#!/usr/bin/env node\nconsole.log("Mock MCP server");'
- );
-
- // Create plugin.json
- writeFileSync(
- join(this.installPath, 'plugin.json'),
- JSON.stringify({
- name: 'memesh',
- version: '2.8.5',
- description: 'Test plugin'
- }, null, 2)
- );
-
- // Create .mcp.json
- writeFileSync(
- join(this.installPath, '.mcp.json'),
- JSON.stringify({
- mcpServers: {
- memesh: {
- command: 'node',
- args: ['dist/mcp/server-bootstrap.js']
- }
- }
- }, null, 2)
- );
- }
-
- /**
- * Cleanup test environment
- */
- cleanup(): void {
- if (existsSync(this.testDir)) {
- rmSync(this.testDir, { recursive: true, force: true });
- }
- }
-
- /**
- * Create a file with content
- */
- createFile(relativePath: string, content: string): void {
- const fullPath = join(this.claudeDir, relativePath);
- const dir = join(fullPath, '..');
- if (!existsSync(dir)) {
- mkdirSync(dir, { recursive: true });
- }
- writeFileSync(fullPath, content, 'utf-8');
- }
-
- /**
- * Read file content
- */
- readFile(relativePath: string): string {
- return readFileSync(join(this.claudeDir, relativePath), 'utf-8');
- }
-
- /**
- * Check if file exists
- */
- fileExists(relativePath: string): boolean {
- return existsSync(join(this.claudeDir, relativePath));
- }
-
- /**
- * Create symlink
- */
- createSymlink(target: string, linkPath: string): void {
- symlinkSync(target, linkPath, 'dir');
- }
-
- /**
- * Setup v2.8.4 legacy installation (MCP configured, no marketplace)
- */
- setupLegacyV284(): void {
- // Only create mcp_settings.json (no marketplace registration)
- this.createFile('mcp_settings.json', JSON.stringify({
- mcpServers: {
- memesh: {
- command: 'node',
- args: [join(this.installPath, 'dist/mcp/server-bootstrap.js')]
- }
- }
- }, null, 2));
- }
-
- /**
- * Setup v2.8.3 legacy installation
- */
- setupLegacyV283(): void {
- // Similar to v2.8.4 but with older config
- this.setupLegacyV284();
- }
-
- /**
- * Setup correct v2.8.5 installation
- */
- setupCorrectV285(): void {
- // Marketplace registered
- this.createFile('plugins/known_marketplaces.json', JSON.stringify({
- 'pcircle-ai': {
- source: {
- source: 'local',
- path: this.installPath
- },
- installLocation: join(this.marketplacesDir, 'pcircle-ai'),
- lastUpdated: new Date().toISOString()
- }
- }, null, 2));
-
- // Symlink created
- this.createSymlink(this.installPath, join(this.marketplacesDir, 'pcircle-ai'));
-
- // Plugin enabled
- this.createFile('settings.json', JSON.stringify({
- enabledPlugins: {
- 'memesh@pcircle-ai': true
- }
- }, null, 2));
-
- // MCP configured
- this.createFile('mcp_settings.json', JSON.stringify({
- mcpServers: {
- memesh: {
- command: 'node',
- args: [join(this.installPath, 'dist/mcp/server-bootstrap.js')]
- }
- }
- }, null, 2));
- }
-}
-
-/**
- * Assert helpers
- */
-export const assert = {
- fileExists(path: string, message?: string): void {
- if (!existsSync(path)) {
- throw new Error(message || `Expected file to exist: ${path}`);
- }
- },
-
- fileNotExists(path: string, message?: string): void {
- if (existsSync(path)) {
- throw new Error(message || `Expected file not to exist: ${path}`);
- }
- },
-
- jsonContains(filePath: string, key: string, expectedValue?: unknown): void {
- const content = readFileSync(filePath, 'utf-8');
- const json = JSON.parse(content);
-
- if (!(key in json)) {
- throw new Error(`Expected JSON to contain key: ${key}`);
- }
-
- if (expectedValue !== undefined && json[key] !== expectedValue) {
- throw new Error(`Expected ${key} to be ${expectedValue}, got ${json[key]}`);
- }
- },
-
- symlinkPointsTo(linkPath: string, expectedTarget: string): void {
- if (!existsSync(linkPath)) {
- throw new Error(`Symlink does not exist: ${linkPath}`);
- }
-
- const actual = readlinkSync(linkPath);
- if (actual !== expectedTarget) {
- throw new Error(`Expected symlink to point to ${expectedTarget}, got ${actual}`);
- }
- }
-};
diff --git a/tests/setup/README.md b/tests/setup/README.md
deleted file mode 100644
index d9902ac3..00000000
--- a/tests/setup/README.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# E2E Testing Setup - Solution C (Hybrid Approach)
-
-## Overview
-
-This directory contains the global setup for E2E tests, implementing **Solution C (Hybrid Approach)** for resource management.
-
-## Phase 1: Global Setup ✅ COMPLETED
-
-**Date**: 2026-02-02
-**Status**: ✅ All tests passing
-
-### What's Implemented
-
-1. **Global Setup** (`global-setup.ts`)
- - Initializes GlobalResourcePool singleton before all tests
- - Configures resource limits (maxConcurrentE2E: 1)
- - Provides baseline protection against resource leaks
- - Automatic cleanup after all tests complete
-
-2. **Vitest Configuration** (`vitest.e2e.config.ts`)
- - Integrated global setup
- - Updated to Vitest 4 format (removed deprecated `poolOptions`)
- - Single worker execution (`maxWorkers: 1, isolate: false`)
- - Zero retries to prevent resource explosion
-
-3. **Verification Tests** (`tests/e2e/verify-global-setup.test.ts`)
- - Validates GlobalResourcePool initialization
- - Verifies configuration correctness
- - Tests status tracking and reporting
- - ✅ 4/4 tests passing
-
-### Benefits
-
-✅ **Zero Breaking Changes**: Existing tests work without modification
-✅ **Baseline Protection**: All E2E tests automatically benefit from resource management
-✅ **Vitest 4 Compatible**: No deprecation warnings
-✅ **Verified**: Comprehensive test coverage
-
-### Configuration Details
-
-```typescript
-// GlobalResourcePool configuration
-{
- maxConcurrentE2E: 1, // Only 1 E2E test at a time
- e2eWaitTimeout: 300000, // 5 minutes timeout
- staleCheckInterval: 60000, // Check for stale locks every minute
- staleLockThreshold: 1800000 // 30 minutes threshold
-}
-```
-
-```typescript
-// Vitest 4 configuration
-{
- pool: 'threads',
- maxWorkers: 1, // Single worker (was maxThreads in v3)
- isolate: false, // Shared context (was singleThread in v3)
- retry: 0, // No retries
-}
-```
-
-## Phase 2: Helper Functions (Next Step)
-
-**Status**: 🚧 Planned
-
-Will provide:
-- `withE2EResource()` - Automatic resource management wrapper
-- `acquireE2EResource()` / `releaseE2EResource()` - Manual control
-- `withE2EResources(n)` - Multi-resource coordination
-
-See Task #2 for details.
-
-## Phase 3: Migration (Optional)
-
-**Status**: 📋 Backlog
-
-Gradual migration of existing tests to use helper functions for explicit control.
-
-## Testing
-
-### Run Verification Test
-
-```bash
-# Using safe wrapper
-npm run test:e2e:safe tests/e2e/verify-global-setup.test.ts
-
-# Direct execution
-npx vitest run --config vitest.e2e.config.ts tests/e2e/verify-global-setup.test.ts
-```
-
-### Expected Output
-
-```
-✓ should have GlobalResourcePool initialized
-✓ should have correct resource pool configuration
-✓ should be able to generate resource pool report
-✓ should track resource pool status
-
-Test Files 1 passed (1)
- Tests 4 passed (4)
-```
-
-## Troubleshooting
-
-### Issue: "poolOptions was removed in Vitest 4"
-
-**Solution**: Configuration has been updated to Vitest 4 format. If you see this warning, ensure you're using the latest version of `vitest.e2e.config.ts`.
-
-### Issue: "close timed out after 10000ms"
-
-**Status**: Known issue, does not affect test results.
-**Cause**: Vitest's process cleanup behavior.
-**Impact**: None - all tests complete successfully.
-
-## Migration Guide
-
-See [Vitest Migration Guide](https://vitest.dev/guide/migration.html) for Vitest 3 → 4 changes.
-
-## Related Files
-
-- `vitest.e2e.config.ts` - E2E test configuration
-- `tests/e2e/verify-global-setup.test.ts` - Verification tests
-- `src/orchestrator/GlobalResourcePool.ts` - Resource pool implementation
-
-## Sources
-
-- [Vitest Migration Guide](https://vitest.dev/guide/migration.html)
-- [Vitest Configuration](https://vitest.dev/config/)
-- [Vitest Pool Configuration](https://vitest.dev/config/pool)
diff --git a/tests/tools.test.ts b/tests/tools.test.ts
new file mode 100644
index 00000000..34c37284
--- /dev/null
+++ b/tests/tools.test.ts
@@ -0,0 +1,188 @@
+import { describe, it, expect, beforeEach, afterEach } from 'vitest';
+import fs from 'fs';
+import os from 'os';
+import path from 'path';
+import { openDatabase, closeDatabase } from '../src/db.js';
+import { handleTool } from '../src/mcp/tools.js';
+
+let tmpDir: string;
+let dbPath: string;
+
+beforeEach(() => {
+ tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'memesh-tools-'));
+ dbPath = path.join(tmpDir, 'test.db');
+ openDatabase(dbPath);
+});
+
+afterEach(() => {
+ closeDatabase();
+ fs.rmSync(tmpDir, { recursive: true, force: true });
+});
+
+// ── Remember ────────────────────────────────────────────────────────────
+
+describe('remember', () => {
+ it('stores an entity and returns confirmation', () => {
+ const result = handleTool('remember', {
+ name: 'auth-decision',
+ type: 'decision',
+ });
+
+ expect(result.isError).toBeUndefined();
+ const data = JSON.parse(result.content[0].text);
+ expect(data.stored).toBe(true);
+ expect(data.name).toBe('auth-decision');
+ expect(data.type).toBe('decision');
+ });
+
+ it('stores tags and relations', () => {
+ // Create target entity first so relation can be established
+ handleTool('remember', { name: 'jwt-pattern', type: 'pattern' });
+
+ const result = handleTool('remember', {
+ name: 'auth-decision',
+ type: 'decision',
+ tags: ['project:myapp', 'type:decision'],
+ relations: [{ to: 'jwt-pattern', type: 'implements' }],
+ });
+
+ expect(result.isError).toBeUndefined();
+ const data = JSON.parse(result.content[0].text);
+ expect(data.tags).toBe(2);
+ expect(data.relations).toBe(1);
+ });
+
+ it('returns validation error when name is missing', () => {
+ const result = handleTool('remember', { type: 'decision' });
+
+ expect(result.isError).toBe(true);
+ expect(result.content[0].text).toContain('name');
+ });
+
+ it('returns validation error when name is empty', () => {
+ const result = handleTool('remember', { name: '', type: 'decision' });
+
+ expect(result.isError).toBe(true);
+ });
+
+ it('stores observations that are searchable', () => {
+ handleTool('remember', {
+ name: 'jwt-lesson',
+ type: 'lesson',
+ observations: ['Use RS256 for JWT signing', 'Rotate keys quarterly'],
+ });
+
+ const result = handleTool('recall', { query: 'RS256' });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.length).toBe(1);
+ expect(data[0].name).toBe('jwt-lesson');
+ expect(data[0].observations).toContain('Use RS256 for JWT signing');
+ });
+
+ it('reports relation errors without failing overall', () => {
+ const result = handleTool('remember', {
+ name: 'auth-decision',
+ type: 'decision',
+ relations: [{ to: 'nonexistent-entity', type: 'related-to' }],
+ });
+
+ expect(result.isError).toBeUndefined();
+ const data = JSON.parse(result.content[0].text);
+ expect(data.stored).toBe(true);
+ expect(data.relations).toBe(0);
+ expect(data.relationErrors).toHaveLength(1);
+ });
+});
+
+// ── Recall ──────────────────────────────────────────────────────────────
+
+describe('recall', () => {
+ beforeEach(() => {
+ handleTool('remember', {
+ name: 'auth-pattern',
+ type: 'pattern',
+ observations: ['JWT tokens for stateless auth'],
+ tags: ['project:myapp'],
+ });
+ handleTool('remember', {
+ name: 'db-decision',
+ type: 'decision',
+ observations: ['Use PostgreSQL for persistence'],
+ tags: ['project:other'],
+ });
+ });
+
+ it('finds entities by query', () => {
+ const result = handleTool('recall', { query: 'auth' });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.length).toBeGreaterThanOrEqual(1);
+ expect(data.some((e: any) => e.name === 'auth-pattern')).toBe(true);
+ });
+
+ it('filters by tag', () => {
+ const result = handleTool('recall', {
+ query: 'auth',
+ tag: 'project:myapp',
+ });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.length).toBe(1);
+ expect(data[0].name).toBe('auth-pattern');
+ });
+
+ it('lists recent when no query provided', () => {
+ const result = handleTool('recall', {});
+ const data = JSON.parse(result.content[0].text);
+ expect(data.length).toBe(2);
+ });
+
+ it('returns empty array when nothing matches', () => {
+ const result = handleTool('recall', { query: 'nonexistent-xyz-123' });
+ const data = JSON.parse(result.content[0].text);
+ expect(data).toEqual([]);
+ });
+
+ it('respects limit parameter', () => {
+ const result = handleTool('recall', { limit: 1 });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.length).toBe(1);
+ });
+});
+
+// ── Forget ──────────────────────────────────────────────────────────────
+
+describe('forget', () => {
+ it('deletes an existing entity', () => {
+ handleTool('remember', { name: 'temp-note', type: 'note' });
+
+ const result = handleTool('forget', { name: 'temp-note' });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.deleted).toBe(true);
+
+ // Verify it's gone
+ const recall = handleTool('recall', { query: 'temp-note' });
+ const recallData = JSON.parse(recall.content[0].text);
+ expect(recallData).toEqual([]);
+ });
+
+ it('returns not-found message for non-existent entity', () => {
+ const result = handleTool('forget', { name: 'does-not-exist' });
+ const data = JSON.parse(result.content[0].text);
+ expect(data.deleted).toBe(false);
+ expect(data.message).toContain('not found');
+ });
+
+ it('returns validation error when name is missing', () => {
+ const result = handleTool('forget', {});
+ expect(result.isError).toBe(true);
+ });
+});
+
+// ── Unknown tool ────────────────────────────────────────────────────────
+
+describe('unknown tool', () => {
+ it('returns error for unknown tool name', () => {
+ const result = handleTool('nonexistent', {});
+ expect(result.isError).toBe(true);
+ expect(result.content[0].text).toContain('Unknown tool');
+ });
+});
diff --git a/tests/ui/AttributionManager.test.ts b/tests/ui/AttributionManager.test.ts
deleted file mode 100644
index d9836cb1..00000000
--- a/tests/ui/AttributionManager.test.ts
+++ /dev/null
@@ -1,114 +0,0 @@
-// tests/ui/AttributionManager.test.ts
-import { describe, it, expect, vi, beforeEach } from 'vitest';
-import { AttributionManager } from '../../src/ui/AttributionManager.js';
-import { UIEventBus } from '../../src/ui/UIEventBus.js';
-
-describe('AttributionManager', () => {
- let manager: AttributionManager;
- let eventBus: UIEventBus;
-
- beforeEach(() => {
- eventBus = UIEventBus.getInstance();
- eventBus.removeAllListeners();
- manager = new AttributionManager(eventBus);
- });
-
- it('should record success attribution', () => {
- const handler = vi.fn();
- eventBus.onAttribution(handler);
-
- manager.recordSuccess(
- ['bg-123'],
- 'Code review completed',
- { timeSaved: 15, tokensUsed: 5000 }
- );
-
- expect(handler).toHaveBeenCalledTimes(1);
- const attribution = handler.mock.calls[0][0];
- expect(attribution.type).toBe('success');
- expect(attribution.agentIds).toEqual(['bg-123']);
- expect(attribution.metadata?.timeSaved).toBe(15);
- });
-
- it('should record error attribution', () => {
- const handler = vi.fn();
- eventBus.onAttribution(handler);
-
- const error = new Error('Connection timeout');
- manager.recordError(['bg-456'], 'Test execution failed', error, true);
-
- expect(handler).toHaveBeenCalledTimes(1);
- const attribution = handler.mock.calls[0][0];
- expect(attribution.type).toBe('error');
- expect(attribution.metadata?.error?.message).toBe('Connection timeout');
- expect(attribution.metadata?.suggestGitHubIssue).toBe(true);
- });
-
- it('should generate GitHub issue suggestion', () => {
- const error = new Error('API timeout in BackgroundExecutor');
- const attribution = {
- id: 'attr-123',
- type: 'error' as const,
- timestamp: new Date(),
- agentIds: ['bg-789'],
- taskDescription: 'Running E2E tests',
- metadata: {
- error: {
- name: error.name,
- message: error.message,
- stack: error.stack,
- },
- suggestGitHubIssue: true,
- },
- };
-
- const suggestion = manager.generateIssueSuggestion(attribution, error);
-
- expect(suggestion.title).toContain('Error');
- expect(suggestion.body).toContain('API timeout');
- expect(suggestion.labels).toContain('bug');
- expect(suggestion.labels).toContain('claude-code-buddy');
- });
-
- it('should sanitize sensitive data from issue body', () => {
- const error = new Error('Failed to connect');
- error.stack = `Error: Failed to connect
- at /Users/username/project/api.ts:42
- Token: sk-abc123xyz456
- API_KEY=secret_key_here`;
-
- const attribution = {
- id: 'attr-123',
- type: 'error' as const,
- timestamp: new Date(),
- agentIds: ['bg-789'],
- taskDescription: 'API call',
- metadata: {
- error: {
- name: error.name,
- message: error.message,
- stack: error.stack,
- },
- },
- };
-
- const suggestion = manager.generateIssueSuggestion(attribution, error);
-
- // Should not contain user paths
- expect(suggestion.body).not.toContain('/Users/username');
- // Should not contain API keys/tokens
- expect(suggestion.body).not.toContain('sk-abc123xyz456');
- expect(suggestion.body).not.toContain('secret_key_here');
- });
-
- it('should retrieve recent attributions', () => {
- manager.recordSuccess(['bg-1'], 'Task 1', { timeSaved: 5 });
- manager.recordSuccess(['bg-2'], 'Task 2', { timeSaved: 10 });
- manager.recordError(['bg-3'], 'Task 3', new Error('Failed'), false);
-
- const recent = manager.getRecentAttributions(2);
- expect(recent).toHaveLength(2);
- expect(recent[0].taskDescription).toBe('Task 3'); // Most recent first
- expect(recent[1].taskDescription).toBe('Task 2');
- });
-});
diff --git a/tests/ui/Dashboard.test.ts b/tests/ui/Dashboard.test.ts
deleted file mode 100644
index 8ec3fdcc..00000000
--- a/tests/ui/Dashboard.test.ts
+++ /dev/null
@@ -1,68 +0,0 @@
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { Dashboard } from '../../src/ui/Dashboard.js';
-import { TestResourceMonitor } from '../helpers/TestResourceMonitor.js';
-import type { UIConfig } from '../../src/ui/types.js';
-
-describe('Dashboard', () => {
- let dashboard: Dashboard;
- let resourceMonitor: TestResourceMonitor;
-
- beforeEach(() => {
- resourceMonitor = new TestResourceMonitor();
- dashboard = new Dashboard(resourceMonitor);
- });
-
- afterEach(() => {
- dashboard.stop();
- });
-
- it('should start and stop dashboard', () => {
- dashboard.start();
- expect(dashboard.isRunning()).toBe(true);
-
- dashboard.stop();
- expect(dashboard.isRunning()).toBe(false);
- });
-
- it('should use custom UI config', () => {
- const customConfig: Partial = {
- updateInterval: 500,
- maxRecentAttributions: 10,
- colorEnabled: false,
- };
-
- const customDashboard = new Dashboard(resourceMonitor, customConfig);
- expect(customDashboard).toBeDefined();
- });
-
- it('should get current dashboard state', () => {
- const state = dashboard.getState();
-
- expect(state.resources).toBeDefined();
- expect(state.agents).toEqual([]);
- expect(state.recentAttributions).toEqual([]);
- expect(state.sessionMetrics).toBeDefined();
- });
-
- it('should update resource stats periodically', async () => {
- dashboard.start();
-
- const initialState = dashboard.getState();
- const initialCPU = initialState.resources.cpu.usage;
-
- // Verify initial CPU is a valid number
- expect(typeof initialCPU).toBe('number');
- expect(initialCPU).toBeGreaterThanOrEqual(0);
- expect(initialCPU).toBeLessThanOrEqual(100);
-
- // Wait for at least one update cycle
- await new Promise((resolve) => setTimeout(resolve, 300));
-
- const updatedState = dashboard.getState();
- expect(updatedState.resources).toBeDefined();
- // CPU should be a valid number (may or may not have changed)
- expect(typeof updatedState.resources.cpu.usage).toBe('number');
-
- dashboard.stop();
- });
-});
diff --git a/tests/ui/MetricsStore.test.ts b/tests/ui/MetricsStore.test.ts
deleted file mode 100644
index c23d2527..00000000
--- a/tests/ui/MetricsStore.test.ts
+++ /dev/null
@@ -1,184 +0,0 @@
-// tests/ui/MetricsStore.test.ts
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { MetricsStore } from '../../src/ui/MetricsStore.js';
-import type { AttributionMessage } from '../../src/ui/types.js';
-import { ValidationError } from '../../src/errors/index.js';
-import { existsSync, unlinkSync } from 'fs';
-import { homedir } from 'os';
-import { join } from 'path';
-
-describe('MetricsStore', () => {
- const testStorePath = join(homedir(), '.memesh-test-metrics-store.json');
- let store: MetricsStore;
-
- beforeEach(() => {
- // Clean up test file
- if (existsSync(testStorePath)) {
- unlinkSync(testStorePath);
- }
- store = new MetricsStore(testStorePath);
- });
-
- afterEach(() => {
- // Clean up test file
- if (existsSync(testStorePath)) {
- unlinkSync(testStorePath);
- }
- });
-
- it('should initialize with new session', () => {
- const metrics = store.getCurrentSessionMetrics();
- expect(metrics.sessionId).toBeDefined();
- expect(metrics.tasksCompleted).toBe(0);
- expect(metrics.tasksFailed).toBe(0);
- expect(metrics.totalTimeSaved).toBe(0);
- });
-
- it('should record successful attribution', () => {
- const attribution: AttributionMessage = {
- id: 'attr-1',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['bg-123'],
- taskDescription: 'Code review',
- metadata: {
- timeSaved: 15,
- tokensUsed: 5000,
- },
- };
-
- store.recordAttribution(attribution);
-
- const metrics = store.getCurrentSessionMetrics();
- expect(metrics.tasksCompleted).toBe(1);
- expect(metrics.totalTimeSaved).toBe(15);
- expect(metrics.totalTokensUsed).toBe(5000);
- });
-
- it('should record failed attribution', () => {
- const attribution: AttributionMessage = {
- id: 'attr-2',
- type: 'error',
- timestamp: new Date(),
- agentIds: ['bg-456'],
- taskDescription: 'Test execution',
- metadata: {
- error: {
- name: 'TestError',
- message: 'Timeout',
- },
- },
- };
-
- store.recordAttribution(attribution);
-
- const metrics = store.getCurrentSessionMetrics();
- expect(metrics.tasksFailed).toBe(1);
- expect(metrics.tasksCompleted).toBe(0);
- });
-
- it('should track agent usage breakdown', () => {
- const attr1: AttributionMessage = {
- id: 'attr-1',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['code-reviewer'],
- taskDescription: 'Review 1',
- };
-
- const attr2: AttributionMessage = {
- id: 'attr-2',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['code-reviewer'],
- taskDescription: 'Review 2',
- };
-
- const attr3: AttributionMessage = {
- id: 'attr-3',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['test-automator'],
- taskDescription: 'Test',
- };
-
- store.recordAttribution(attr1);
- store.recordAttribution(attr2);
- store.recordAttribution(attr3);
-
- const metrics = store.getCurrentSessionMetrics();
- expect(metrics.agentUsageBreakdown['code-reviewer']).toBe(2);
- expect(metrics.agentUsageBreakdown['test-automator']).toBe(1);
- });
-
- it('should persist and load metrics', async () => {
- const attribution: AttributionMessage = {
- id: 'attr-1',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['bg-123'],
- taskDescription: 'Task',
- metadata: { timeSaved: 10 },
- };
-
- store.recordAttribution(attribution);
- await store.persist();
-
- // Create new store instance (simulates restart)
- const newStore = new MetricsStore(testStorePath);
- await newStore.load();
-
- const metrics = newStore.getCurrentSessionMetrics();
- expect(metrics.tasksCompleted).toBe(1);
- expect(metrics.totalTimeSaved).toBe(10);
- });
-
- describe('path traversal protection', () => {
- it('should throw ValidationError when path is outside home directory', () => {
- expect(() => new MetricsStore('/etc/test-memesh-metrics.json')).toThrow(ValidationError);
- });
-
- it('should include path details in the ValidationError context', () => {
- let thrown: unknown;
- try {
- new MetricsStore('/etc/test-memesh-metrics.json');
- } catch (err) {
- thrown = err;
- }
-
- expect(thrown).toBeInstanceOf(ValidationError);
- const ve = thrown as ValidationError;
- expect(ve.message).toContain('home or data directory');
- expect(ve.context?.provided).toBe('/etc/test-memesh-metrics.json');
- });
-
- it('should throw ValidationError for path with .. traversal that escapes home', () => {
- const escapingPath = join(homedir(), '..', '..', 'etc', 'passwd');
- expect(() => new MetricsStore(escapingPath)).toThrow(ValidationError);
- });
-
- it('should accept a path within the home directory without throwing', () => {
- const validPath = join(homedir(), '.memesh-test-validation-check.json');
- expect(() => new MetricsStore(validPath)).not.toThrow();
- });
- });
-
- it('should generate daily summary report', async () => {
- const attr1: AttributionMessage = {
- id: 'attr-1',
- type: 'success',
- timestamp: new Date(),
- agentIds: ['code-reviewer'],
- taskDescription: 'Review',
- metadata: { timeSaved: 20 },
- };
-
- store.recordAttribution(attr1);
-
- const report = await store.generateDailyReport();
-
- expect(report).toContain('Daily Productivity Report');
- expect(report).toContain('**Tasks Completed:** 1');
- expect(report).toContain('**Time Saved:** 20 minutes');
- });
-});
diff --git a/tests/ui/ProgressRenderer.test.ts b/tests/ui/ProgressRenderer.test.ts
deleted file mode 100644
index bf24d102..00000000
--- a/tests/ui/ProgressRenderer.test.ts
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * ProgressRenderer Test Suite
- *
- * Tests for the ProgressRenderer terminal rendering component
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import { ProgressRenderer } from '../../src/ui/ProgressRenderer.js';
-import { DashboardState, DashboardConfig } from '../../src/ui/types.js';
-
-describe('ProgressRenderer', () => {
- let renderer: ProgressRenderer;
- const defaultConfig: DashboardConfig = {
- updateInterval: 100,
- maxRecentEvents: 10,
- showSpinner: true,
- showMetrics: true,
- showAttribution: true,
- };
-
- beforeEach(() => {
- renderer = new ProgressRenderer(defaultConfig);
- });
-
- afterEach(() => {
- if (renderer) {
- renderer.stop();
- }
- });
-
- describe('Initialization', () => {
- it('should create renderer with default config', () => {
- expect(renderer).toBeDefined();
- });
-
- it('should not be running initially', () => {
- expect(renderer.isRunning()).toBe(false);
- });
- });
-
- describe('Lifecycle', () => {
- it('should start rendering with callback', () => {
- const getState = vi.fn().mockReturnValue({
- activeAgents: new Map(),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 0,
- completedTasks: 0,
- failedTasks: 0,
- agentUsageCount: {},
- estimatedTimeSaved: 0,
- tokensUsed: 0,
- },
- });
-
- renderer.start(getState);
- expect(renderer.isRunning()).toBe(true);
- expect(getState).toHaveBeenCalled();
-
- renderer.stop();
- expect(renderer.isRunning()).toBe(false);
- });
-
- it('should stop rendering and clear interval', async () => {
- const getState = vi.fn().mockReturnValue({
- activeAgents: new Map(),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 0,
- completedTasks: 0,
- failedTasks: 0,
- agentUsageCount: {},
- estimatedTimeSaved: 0,
- tokensUsed: 0,
- },
- });
-
- renderer.start(getState);
- expect(renderer.isRunning()).toBe(true);
-
- await new Promise((resolve) => setTimeout(resolve, 50));
- renderer.stop();
- expect(renderer.isRunning()).toBe(false);
- });
- });
-
- describe('Throttling', () => {
- it('should throttle render calls to minimum interval', async () => {
- const getState = vi.fn().mockReturnValue({
- activeAgents: new Map(),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 0,
- completedTasks: 0,
- failedTasks: 0,
- agentUsageCount: {},
- estimatedTimeSaved: 0,
- tokensUsed: 0,
- },
- });
-
- renderer.start(getState);
-
- // Wait for some updates
- await new Promise((resolve) => setTimeout(resolve, 250));
- const callCount = getState.mock.calls.length;
- renderer.stop();
-
- // Should not call too frequently (100ms interval + 100ms throttle = ~200ms per call minimum)
- // In 250ms, should have ~1-2 calls, not 25 calls
- expect(callCount).toBeLessThan(5);
- expect(callCount).toBeGreaterThan(0);
- });
- });
-
- describe('Rendering', () => {
- it('should render dashboard with active agents', () => {
- const state: DashboardState = {
- activeAgents: new Map([
- [
- 'agent-1',
- {
- agentId: 'agent-1',
- agentType: 'code-reviewer',
- status: 'running',
- progress: 0.5,
- currentTask: 'Reviewing code',
- startTime: new Date(),
- },
- ],
- ]),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 10,
- completedTasks: 5,
- failedTasks: 1,
- agentUsageCount: { 'code-reviewer': 3 },
- estimatedTimeSaved: 1800,
- tokensUsed: 25000,
- },
- };
-
- const output = renderer['renderDashboard'](state);
-
- expect(output).toContain('code-reviewer');
- expect(output).toContain('Reviewing code');
- expect(output).toContain('50%');
- });
-
- it('should render metrics section', () => {
- const state: DashboardState = {
- activeAgents: new Map(),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 10,
- completedTasks: 7,
- failedTasks: 2,
- agentUsageCount: { 'debugger': 2, 'test-automator': 1 },
- estimatedTimeSaved: 3600,
- tokensUsed: 50000,
- },
- };
-
- const output = renderer['renderDashboard'](state);
-
- expect(output).toContain('Tasks');
- expect(output).toContain('7/10');
- expect(output).toContain('Time Saved');
- expect(output).toContain('1h');
- });
- });
-
- describe('Configuration', () => {
- it('should respect showMetrics config', () => {
- const configWithoutMetrics: DashboardConfig = {
- ...defaultConfig,
- showMetrics: false,
- };
- const rendererNoMetrics = new ProgressRenderer(configWithoutMetrics);
-
- const state: DashboardState = {
- activeAgents: new Map(),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 10,
- completedTasks: 5,
- failedTasks: 0,
- agentUsageCount: {},
- estimatedTimeSaved: 0,
- tokensUsed: 0,
- },
- };
-
- const output = rendererNoMetrics['renderDashboard'](state);
-
- // Should not contain metrics section
- expect(output).not.toContain('Time Saved');
-
- rendererNoMetrics.stop();
- });
- });
-});
diff --git a/tests/ui/ResponseFormatter.error-messages.test.ts b/tests/ui/ResponseFormatter.error-messages.test.ts
deleted file mode 100644
index 381b201c..00000000
--- a/tests/ui/ResponseFormatter.error-messages.test.ts
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
- * Error Messages - User-First Tests
- *
- * Tests for improved error message formatting with clear guidance
- * Task 9: Update Error Messages to Be User-First
- */
-
-import { describe, it, expect, beforeEach } from 'vitest';
-import { ResponseFormatter } from '../../src/ui/ResponseFormatter.js';
-import type { AgentResponse } from '../../src/ui/ResponseFormatter.js';
-
-describe('Error Messages - User-First', () => {
- let formatter: ResponseFormatter;
-
- beforeEach(() => {
- formatter = new ResponseFormatter();
- });
-
- describe('Configuration Errors', () => {
- it('should format missing configuration error with clear guidance', () => {
- const error = new Error('MCP server configuration is missing or invalid');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Execute development task',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should have clear error indicator
- expect(formatted).toContain('❌');
-
- // Should explain what happened
- expect(formatted).toMatch(/configuration|not configured|missing/i);
-
- // Should provide fix steps
- expect(formatted).toMatch(/fix steps?:/i);
- expect(formatted).toMatch(/1\./); // Numbered steps
- expect(formatted).toMatch(/2\./);
-
- // Should NOT show stack trace in normal mode
- expect(formatted).not.toContain('at Object');
- expect(formatted).not.toContain('at Module');
- });
-
- it('should provide specific guidance for MCP server issues', () => {
- const error = new Error('MCP server not found or not running');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Execute task',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should indicate service/integration issue
- expect(formatted).toMatch(/service|integration|connect/i);
-
- // Should provide troubleshooting steps
- expect(formatted).toMatch(/fix steps?:/i);
- });
- });
-
- describe('Validation Errors', () => {
- it('should format validation error with clear fix steps', () => {
- const error = new Error('Invalid input: taskDescription is required');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: '',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should indicate validation error
- expect(formatted).toMatch(/invalid|validation/i);
-
- // Should provide fix guidance
- expect(formatted).toMatch(/fix steps?:/i);
- expect(formatted).toMatch(/buddy-help/i); // Reference help command
-
- // Should NOT show technical details
- expect(formatted).not.toContain('ValidationError');
- expect(formatted).not.toContain('at validateInput');
- });
-
- it('should explain why validation failed', () => {
- const error = new Error('Invalid task priority: must be one of low, normal, high, urgent');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should show root cause
- expect(formatted).toMatch(/root cause/i);
- expect(formatted).toContain('must be one of');
-
- // Should provide examples or fix steps
- expect(formatted).toMatch(/fix steps?:/i);
- });
- });
-
- describe('Network/API Errors', () => {
- it('should format connection error with retry guidance', () => {
- const error = new Error('Failed to connect to MeMesh server: ECONNREFUSED');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Send task',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should indicate connection issue
- expect(formatted).toMatch(/connection|connect/i);
-
- // Should suggest retry or restart
- expect(formatted).toMatch(/retry|restart|wait/i);
-
- // Should provide troubleshooting steps
- expect(formatted).toMatch(/fix steps?:/i);
- });
-
- it('should format timeout error with clear explanation', () => {
- const error = new Error('Request timeout: Operation took too long');
- const response: AgentResponse = {
- agentType: 'buddy-remember',
- taskDescription: 'Get task status',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should indicate timeout
- expect(formatted).toMatch(/timeout|too long/i);
-
- // Should explain impact
- expect(formatted).toMatch(/root cause/i);
-
- // Should NOT show full stack trace
- expect(formatted).not.toContain('at Timeout');
- });
- });
-
- describe('Authorization Errors', () => {
- it('should format permission error with clear fix steps', () => {
- const error = new Error('Permission denied: insufficient privileges');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Execute task',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should indicate permission issue
- expect(formatted).toMatch(/permission|denied|unauthorized/i);
-
- // Should provide fix steps
- expect(formatted).toMatch(/fix steps?:/i);
- });
- });
-
- describe('Edge Cases', () => {
- it('should handle undefined error gracefully', () => {
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error: undefined as unknown as Error,
- };
-
- const formatted = formatter.format(response);
-
- // Should still show error indicator
- expect(formatted).toContain('❌');
-
- // Should have fallback message
- expect(formatted).toMatch(/error|unexpected/i);
-
- // Should provide general guidance
- expect(formatted).toMatch(/fix steps?|help/i);
- });
-
- it('should handle error with empty message', () => {
- const error = new Error('');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should show error indicator
- expect(formatted).toContain('❌');
-
- // Should have fallback description
- expect(formatted).toMatch(/unexpected|error/i);
- });
-
- it('should NOT show stack trace in normal mode', () => {
- const error = new Error('Test error with stack');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- // Ensure DEBUG is not set
- delete process.env.DEBUG;
-
- const formatted = formatter.format(response);
-
- // Should NOT contain stack trace keywords
- expect(formatted).not.toContain('at Object');
- expect(formatted).not.toContain('at Module');
- expect(formatted).not.toContain('at Function');
- expect(formatted).not.toContain('.ts:');
- expect(formatted).not.toContain('.js:');
- });
-
- it('should show stack trace in debug mode', () => {
- const error = new Error('Test error with stack');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- // Enable DEBUG mode
- process.env.DEBUG = 'true';
-
- const formatted = formatter.format(response);
-
- // Should contain stack trace section
- expect(formatted).toMatch(/stack trace/i);
-
- // Clean up
- delete process.env.DEBUG;
- });
- });
-
- describe('User-First Format Structure', () => {
- it('should follow user-first format pattern', () => {
- const error = new Error('Database connection failed');
- const response: AgentResponse = {
- agentType: 'buddy-remember',
- taskDescription: 'Search memory',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should have clear sections
- expect(formatted).toMatch(/root cause/i); // What happened
- expect(formatted).toMatch(/fix steps?:/i); // How to fix
-
- // Should have help footer
- expect(formatted).toMatch(/need.*help|get.*help/i);
- });
-
- it('should prioritize actionable guidance over technical details', () => {
- const error = new Error('TypeScript compilation error in module X');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Execute task',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should focus on what user can do
- expect(formatted).toMatch(/fix steps?:/i);
-
- // Should not overwhelm with technical jargon
- expect(formatted).not.toContain('TypeError:');
- expect(formatted).not.toContain('ReferenceError:');
-
- // Should provide clear guidance
- expect(formatted).toMatch(/1\./); // Numbered steps
- });
- });
-
- describe('Help References', () => {
- it('should reference buddy-help for command guidance', () => {
- const error = new Error('Invalid command syntax');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should reference help command
- expect(formatted).toMatch(/buddy-help/i);
- });
-
- it('should include relevant documentation links', () => {
- const error = new Error('Configuration error: MCP server misconfigured');
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test',
- status: 'error',
- error,
- };
-
- const formatted = formatter.format(response);
-
- // Should have documentation section or link
- expect(formatted).toMatch(/documentation|docs|guide/i);
- });
- });
-});
diff --git a/tests/ui/ResponseFormatter.test.ts b/tests/ui/ResponseFormatter.test.ts
deleted file mode 100644
index edf3a26d..00000000
--- a/tests/ui/ResponseFormatter.test.ts
+++ /dev/null
@@ -1,716 +0,0 @@
-/**
- * ResponseFormatter Test Suite
- *
- * Comprehensive tests for response formatting with visual hierarchy
- * Covers all complexity levels, section formatters, and edge cases
- */
-
-import { describe, it, expect, beforeEach } from 'vitest';
-import { ResponseFormatter, type AgentResponse } from '../../src/ui/ResponseFormatter.js';
-
-describe('ResponseFormatter', () => {
- let formatter: ResponseFormatter;
-
- beforeEach(() => {
- formatter = new ResponseFormatter();
- });
-
- // ============================================================================
- // Complexity Detection Tests
- // ============================================================================
- describe('Complexity Detection', () => {
- it('should detect simple response (string result, no metadata)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Simple task',
- status: 'success',
- results: 'Simple string result',
- };
-
- const formatted = formatter.format(response);
-
- // Simple format: minimal header with operation name (no boxes, no dividers)
- expect(formatted).toContain('Test Agent'); // Operation name
- expect(formatted).not.toContain('─'.repeat(60)); // No dividers
- expect(formatted).not.toContain('╭'); // No boxes
- });
-
- it('should detect medium response (structured object)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Medium task',
- status: 'success',
- results: {
- key1: 'value1',
- key2: 'value2',
- },
- };
-
- const formatted = formatter.format(response);
-
- // Medium format: minimal header + results, no heavy borders
- expect(formatted).toContain('Test Agent'); // Operation name
- expect(formatted).toContain('key1');
- expect(formatted).not.toContain('╭'); // No boxes
- });
-
- it('should detect complex response (has error)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Error task',
- status: 'error',
- error: new Error('Test error'),
- };
-
- const formatted = formatter.format(response);
-
- // Complex format: minimal header + dividers (no boxes)
- expect(formatted).toContain('Test Agent'); // Operation name
- expect(formatted).toContain('─'.repeat(60)); // Has dividers
- expect(formatted).toContain('Error');
- });
-
- it('should detect complex response (has enhanced prompt)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Enhanced task',
- status: 'success',
- enhancedPrompt: {
- systemPrompt: 'Test system prompt',
- userPrompt: 'Test user prompt',
- },
- };
-
- const formatted = formatter.format(response);
-
- // Complex format: minimal header + dividers (no boxes)
- expect(formatted).toContain('Test Agent'); // Operation name
- expect(formatted).toContain('─'.repeat(60)); // Has dividers
- expect(formatted).toContain('Enhanced Prompt');
- });
-
- it('should detect complex response (large results)', () => {
- const largeResult = 'x'.repeat(600); // > 500 chars threshold
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Large task',
- status: 'success',
- results: largeResult,
- };
-
- const formatted = formatter.format(response);
-
- // Complex format for large results: minimal header (no box)
- expect(formatted).toContain('Test Agent'); // Operation name
- });
- });
-
- // ============================================================================
- // Section Formatter Tests
- // ============================================================================
- describe('Section Formatters', () => {
- describe('formatResults', () => {
- it('should format results with success icon', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: { key: 'value' },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('✓'); // Success icon
- expect(formatted).toContain('Results');
- });
-
- it('should format string results (simple format)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test task',
- status: 'success',
- results: 'Test result string',
- };
-
- const formatted = formatter.format(response);
-
- // Simple format: minimal header with operation name
- expect(formatted).toContain('✓');
- expect(formatted).toContain('Test Agent'); // Operation name
- // String results are treated as simple, so full content not shown
- });
-
- it('should format object results', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: {
- key1: 'value1',
- key2: 123,
- key3: true,
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('key1');
- expect(formatted).toContain('value1');
- expect(formatted).toContain('key2');
- expect(formatted).toContain('123');
- });
-
- it('should format array results (simple format)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test task',
- status: 'success',
- results: ['item1', 'item2', 'item3'],
- };
-
- const formatted = formatter.format(response);
-
- // Simple array is treated as simple format with minimal header
- expect(formatted).toContain('✓');
- expect(formatted).toContain('Test Agent'); // Operation name
- });
- });
-
- describe('formatError', () => {
- it('should format error with ErrorClassifier details', () => {
- const testError = new Error('Test error message');
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'error',
- error: testError,
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('❌'); // Error icon (updated in Task 9)
- expect(formatted).toContain('Root Cause:'); // ErrorClassifier output
- expect(formatted).toContain('Fix Steps:'); // ErrorClassifier output
- // Note: ErrorClassifier may categorize the error, so original message might not appear verbatim
- });
-
- it('should include enhanced error details with ErrorClassifier', () => {
- const testError = new Error('Test error');
- testError.stack = 'Error: Test error\n at test.ts:10:5';
-
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'error',
- error: testError,
- };
-
- const formatted = formatter.format(response);
-
- // Should include ErrorClassifier output
- expect(formatted).toContain('Root Cause:');
- expect(formatted).toContain('Fix Steps:');
- expect(formatted).toContain('Need more help?');
-
- // Stack trace only shown in DEBUG mode
- // (not asserting stack trace presence in normal mode)
- });
- });
-
- describe('formatMetadata', () => {
- it('should format metadata with bullet separators (medium format)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: {
- // Object results trigger medium format
- key: 'value',
- },
- metadata: {
- duration: 2345,
- tokensUsed: 1234,
- model: 'claude-sonnet-4.5',
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Duration');
- expect(formatted).toContain('2.3s'); // Formatted duration
- expect(formatted).toContain('Tokens');
- expect(formatted).toContain('1,234'); // Formatted number
- expect(formatted).toContain('Model');
- expect(formatted).toContain('claude-sonnet-4.5');
- expect(formatted).toContain('•'); // Bullet separator
- });
-
- it('should format duration correctly (medium format)', () => {
- const testCases = [
- { ms: 123, expected: '123ms' },
- { ms: 2345, expected: '2.3s' },
- { ms: 65000, expected: '1m 5s' },
- { ms: 125000, expected: '2m 5s' },
- ];
-
- testCases.forEach(({ ms, expected }) => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: { key: 'value' }, // Object to trigger medium format
- metadata: { duration: ms },
- };
-
- const formatted = formatter.format(response);
- expect(formatted).toContain(expected);
- });
- });
-
- it('should format numbers with commas (medium format)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: { key: 'value' }, // Object to trigger medium format
- metadata: { tokensUsed: 1234567 },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('1,234,567');
- });
- });
- });
-
- // ============================================================================
- // Next Steps Generation Tests
- // ============================================================================
- describe('Next Steps Generation', () => {
- it('should generate next steps for errors', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'error',
- error: new Error('Test error'),
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('💡'); // Next Steps icon
- expect(formatted).toContain('Next Steps');
- expect(formatted).toContain('Review the error message');
- expect(formatted).toContain('Check recent changes');
- });
-
- it('should generate next steps for buddy-do success', () => {
- const response: AgentResponse = {
- agentType: 'buddy-do',
- taskDescription: 'Test task',
- status: 'success',
- results: { message: 'Task completed' },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Next Steps');
- expect(formatted).toContain('Verify the implementation');
- expect(formatted).toContain('Run tests');
- expect(formatted).toContain('buddy-remember');
- });
-
- it('should generate next steps for buddy-remember with no results', () => {
- const response: AgentResponse = {
- agentType: 'buddy-remember',
- taskDescription: 'Search memory',
- status: 'success',
- results: {
- query: 'test query',
- count: 0,
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Next Steps');
- expect(formatted).toContain('Try a broader search term');
- expect(formatted).toContain('buddy-do to create new memories');
- });
-
- it('should generate next steps for buddy-remember with results', () => {
- const response: AgentResponse = {
- agentType: 'buddy-remember',
- taskDescription: 'Search memory',
- status: 'success',
- results: {
- query: 'test query',
- count: 3,
- memories: [{ id: 1, content: 'test' }],
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Next Steps');
- expect(formatted).toContain('Review the memories');
- expect(formatted).toContain('Apply these learnings');
- });
- });
-
- // ============================================================================
- // Enhanced Prompt Tests
- // ============================================================================
- describe('Enhanced Prompt Formatting', () => {
- it('should format enhanced prompt with sections', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- enhancedPrompt: {
- systemPrompt: 'System prompt text',
- userPrompt: 'User prompt text',
- suggestedModel: 'claude-opus-4.5',
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('🚀'); // Enhanced Prompt icon
- expect(formatted).toContain('Enhanced Prompt');
- expect(formatted).toContain('System:');
- expect(formatted).toContain('System prompt text');
- expect(formatted).toContain('User:');
- expect(formatted).toContain('User prompt text');
- expect(formatted).toContain('Suggested Model');
- expect(formatted).toContain('claude-opus-4.5');
- });
-
- it('should format guardrails with warning icon', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- enhancedPrompt: {
- systemPrompt: 'System prompt',
- userPrompt: 'User prompt with guardrails',
- metadata: {
- guardrails: 'CRITICAL: Do not modify API',
- },
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Guardrails');
- expect(formatted).toContain('CRITICAL: Do not modify API');
- });
-
- it('should truncate long prompts', () => {
- const longPrompt = 'x'.repeat(400); // > 300 chars
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- enhancedPrompt: {
- systemPrompt: longPrompt,
- userPrompt: 'short',
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('truncated');
- expect(formatted).toContain('characters');
- });
- });
-
- // ============================================================================
- // Edge Cases Tests
- // ============================================================================
- describe('Edge Cases', () => {
- it('should handle empty results', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: '',
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toBeTruthy();
- expect(formatted.length).toBeGreaterThan(0);
- });
-
- it('should handle undefined results', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toBeTruthy();
- expect(formatted).toContain('Test');
- });
-
- it('should handle null values in objects', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: {
- key1: null,
- key2: undefined,
- key3: 'value',
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toBeTruthy();
- expect(formatted).toContain('key3');
- expect(formatted).toContain('value');
- });
-
- it('should handle Unicode characters (emoji, CJK)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: '測試任務 with emoji 🚀',
- status: 'success',
- results: { message: '結果 with emoji ✅' }, // Object to show results
- };
-
- const formatted = formatter.format(response);
-
- // Operation name shown instead of task description
- expect(formatted).toContain('Test Agent');
- expect(formatted).toContain('結果');
- expect(formatted).toContain('✅');
- });
-
- it('should handle very long task descriptions', () => {
- const longDescription = 'Task '.repeat(100);
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: longDescription,
- status: 'success',
- results: 'test',
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toBeTruthy();
- // Operation name shown instead of task description
- expect(formatted).toContain('Test Agent');
- });
-
- it('should handle missing metadata gracefully', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: 'test',
- metadata: {},
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toBeTruthy();
- // Should not crash, metadata section just won't appear
- });
-
- it('should handle partial metadata (medium format)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: { key: 'value' }, // Object to trigger medium format
- metadata: {
- duration: 123,
- // Missing tokensUsed and model
- },
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Duration');
- expect(formatted).toContain('123ms');
- });
- });
-
- // ============================================================================
- // Visual Hierarchy Tests
- // ============================================================================
- describe('Visual Hierarchy', () => {
- it('should include section dividers for complex responses', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'error',
- error: new Error('Test'),
- };
-
- const formatted = formatter.format(response);
-
- // Should have multiple dividers
- const dividerCount = (formatted.match(/─{60}/g) || []).length;
- expect(dividerCount).toBeGreaterThan(0);
- });
-
- it('should not include dividers for simple responses', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- results: 'Simple result',
- };
-
- const formatted = formatter.format(response);
-
- // Should not have dividers
- expect(formatted).not.toContain('─'.repeat(60));
- });
-
- it('should use minimal header for complex responses (no boxes)', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'success',
- enhancedPrompt: {
- systemPrompt: 'test',
- userPrompt: 'test',
- },
- };
-
- const formatted = formatter.format(response);
-
- // Minimal header - no boxes
- expect(formatted).toContain('Test Agent');
- expect(formatted).not.toContain('╭');
- expect(formatted).not.toContain('╰');
- });
-
- it('should include attribution footer for complex responses', () => {
- const response: AgentResponse = {
- agentType: 'test-agent',
- taskDescription: 'Test',
- status: 'error',
- error: new Error('Test'),
- };
-
- const formatted = formatter.format(response);
-
- expect(formatted).toContain('Powered by');
- expect(formatted).toContain('MeMesh');
- });
- });
-
- // ============================================================================
- // Minimal Design (Design B) Tests
- // ============================================================================
- describe('ResponseFormatter - Minimal Design (Design B)', () => {
- describe('Minimal Header (no boxes)', () => {
- it('should format success with operation name and result summary', () => {
- const response: AgentResponse = {
- agentType: 'memesh-remember',
- taskDescription: 'Search memory for "api design"',
- status: 'success',
- results: {
- count: 3,
- memories: [{}, {}, {}],
- },
- };
-
- const formatted = formatter.format(response);
-
- // Should have operation name (not MEMESH-REMEMBER)
- expect(formatted).toContain('Memory Search');
-
- // Should have result summary (not generic SUCCESS)
- expect(formatted).toContain('Found 3 memories');
-
- // Should NOT have boxes
- expect(formatted).not.toContain('╭');
- expect(formatted).not.toContain('╰');
-
- // Should have minimal divider
- expect(formatted).toContain('─'.repeat(60));
- });
-
- it('should use contextual operation names for all tools', () => {
- const testCases = [
- { agentType: 'memesh-do', expected: 'Task Router' },
- { agentType: 'memesh-help', expected: 'Help Center' },
- { agentType: 'create-entities', expected: 'Knowledge Storage' },
- { agentType: 'memesh-record-mistake', expected: 'Error Recording' },
- ];
-
- testCases.forEach(({ agentType, expected }) => {
- const response: AgentResponse = {
- agentType,
- taskDescription: 'Test',
- status: 'success',
- results: 'Test result',
- };
-
- const formatted = formatter.format(response);
- expect(formatted).toContain(expected);
- });
- });
-
- it('should format contextual success messages', () => {
- const testCases = [
- {
- agentType: 'memesh-remember',
- results: { count: 2, memories: [{}, {}] },
- expected: 'Found 2 memories',
- },
- {
- agentType: 'memesh-do',
- results: { message: 'Task completed' },
- expected: 'Task Router',
- },
- {
- agentType: 'create-entities',
- results: { created: 3 },
- expected: 'Created 3 entities',
- },
- ];
-
- testCases.forEach(({ agentType, results, expected }) => {
- const response: AgentResponse = {
- agentType,
- taskDescription: 'Test',
- status: 'success',
- results,
- };
-
- const formatted = formatter.format(response);
- expect(formatted).toContain(expected);
- });
- });
- });
-
- describe('Backward Compatibility', () => {
- it('should handle buddy-* prefixed tools correctly', () => {
- const response: AgentResponse = {
- agentType: 'buddy-remember',
- taskDescription: 'Search memory',
- status: 'success',
- results: { count: 1, memories: [{}] },
- };
-
- const formatted = formatter.format(response);
-
- // Should display with correct operation name
- expect(formatted).toContain('Memory Search');
- expect(formatted).toContain('Found 1 memory');
-
- // Should NOT show deprecation notice (buddy-* is the primary naming)
- expect(formatted).not.toContain('Deprecation Notice');
- });
- });
- });
-});
diff --git a/tests/ui/UIEventBus.test.ts b/tests/ui/UIEventBus.test.ts
deleted file mode 100644
index 2c70c700..00000000
--- a/tests/ui/UIEventBus.test.ts
+++ /dev/null
@@ -1,266 +0,0 @@
-/**
- * UIEventBus Test Suite
- *
- * Tests for the UIEventBus event system
- */
-
-import { describe, it, expect, beforeEach, vi } from 'vitest';
-import { UIEventBus } from '../../src/ui/UIEventBus.js';
-import { UIEventType, ProgressIndicator, SuccessEvent, ErrorEvent } from '../../src/ui/types.js';
-
-describe('UIEventBus', () => {
- let eventBus: UIEventBus;
-
- beforeEach(() => {
- // Get a fresh instance for each test
- eventBus = UIEventBus.getInstance();
- // Clear all listeners
- eventBus.removeAllListeners();
- });
-
- describe('Singleton Pattern', () => {
- it('should return the same instance', () => {
- const instance1 = UIEventBus.getInstance();
- const instance2 = UIEventBus.getInstance();
-
- expect(instance1).toBe(instance2);
- });
- });
-
- describe('Event Emission and Subscription', () => {
- it('should emit and receive progress events', () => {
- const mockHandler = vi.fn();
- const progressData: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'code-reviewer',
- taskDescription: 'Reviewing code',
- progress: 0.5,
- currentStage: 'analyzing',
- startTime: new Date(),
- };
-
- eventBus.onProgress(mockHandler);
- eventBus.emitProgress(progressData);
-
- expect(mockHandler).toHaveBeenCalledOnce();
- expect(mockHandler).toHaveBeenCalledWith(progressData);
- });
-
- it('should emit and receive success events', () => {
- const mockHandler = vi.fn();
- const successData: SuccessEvent = {
- agentId: 'test-agent-1',
- agentType: 'test-automator',
- taskDescription: 'Running tests',
- result: { testsPass: true },
- duration: 5000,
- timestamp: new Date(),
- };
-
- eventBus.onSuccess(mockHandler);
- eventBus.emitSuccess(successData);
-
- expect(mockHandler).toHaveBeenCalledOnce();
- expect(mockHandler).toHaveBeenCalledWith(successData);
- });
-
- it('should emit and receive error events', () => {
- const mockHandler = vi.fn();
- const errorData: ErrorEvent = {
- agentId: 'test-agent-2',
- agentType: 'backend-developer',
- taskDescription: 'API implementation',
- error: new Error('Database connection failed'),
- timestamp: new Date(),
- };
-
- eventBus.onError(mockHandler);
- eventBus.emitError(errorData);
-
- expect(mockHandler).toHaveBeenCalledOnce();
- expect(mockHandler).toHaveBeenCalledWith(errorData);
- });
- });
-
- describe('Multiple Listeners', () => {
- it('should support multiple listeners for the same event', () => {
- const handler1 = vi.fn();
- const handler2 = vi.fn();
- const progressData: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'debugger',
- taskDescription: 'Debugging issue',
- progress: 0.3,
- startTime: new Date(),
- };
-
- eventBus.onProgress(handler1);
- eventBus.onProgress(handler2);
- eventBus.emitProgress(progressData);
-
- expect(handler1).toHaveBeenCalledOnce();
- expect(handler2).toHaveBeenCalledOnce();
- });
- });
-
- describe('Unsubscribe Functionality', () => {
- it('should allow unsubscribing from events', () => {
- const mockHandler = vi.fn();
- const progressData: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'frontend-developer',
- taskDescription: 'Building UI',
- progress: 0.5,
- startTime: new Date(),
- };
-
- const unsubscribe = eventBus.onProgress(mockHandler);
- eventBus.emitProgress(progressData);
-
- expect(mockHandler).toHaveBeenCalledOnce();
-
- // Unsubscribe
- unsubscribe();
- eventBus.emitProgress(progressData);
-
- // Should still be called only once (not twice)
- expect(mockHandler).toHaveBeenCalledOnce();
- });
- });
-
- describe('Error Handling', () => {
- it('should wrap handlers with error boundary', () => {
- const faultyHandler = vi.fn(() => {
- throw new Error('Handler error');
- });
- const errorEventHandler = vi.fn();
- const progressData: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'test-automator',
- taskDescription: 'Running tests',
- progress: 0.5,
- startTime: new Date(),
- };
-
- // Listen for error events emitted by error boundary
- eventBus.on('error', errorEventHandler);
-
- // Subscribe faulty handler
- eventBus.onProgress(faultyHandler);
-
- // This should not throw - error should be caught and emitted as error event
- expect(() => eventBus.emitProgress(progressData)).not.toThrow();
-
- // Faulty handler was called
- expect(faultyHandler).toHaveBeenCalledOnce();
-
- // Error event should have been emitted
- expect(errorEventHandler).toHaveBeenCalledOnce();
- const errorEvent = errorEventHandler.mock.calls[0][0];
- expect(errorEvent.error).toBeInstanceOf(Error);
- expect(errorEvent.error.message).toBe('Handler error');
- });
-
- it('should continue processing other handlers after one fails', () => {
- const faultyHandler = vi.fn(() => {
- throw new Error('Handler error');
- });
- const goodHandler = vi.fn();
- const progressData: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'code-reviewer',
- taskDescription: 'Reviewing code',
- progress: 0.5,
- startTime: new Date(),
- };
-
- eventBus.onProgress(faultyHandler);
- eventBus.onProgress(goodHandler);
-
- eventBus.emitProgress(progressData);
-
- expect(faultyHandler).toHaveBeenCalledOnce();
- expect(goodHandler).toHaveBeenCalledOnce();
- });
- });
-
- describe('Event Type Support', () => {
- it('should support all UIEventType events', () => {
- const handlers = {
- progress: vi.fn(),
- agent_start: vi.fn(),
- agent_complete: vi.fn(),
- success: vi.fn(),
- error: vi.fn(),
- metrics_update: vi.fn(),
- };
-
- // Subscribe to all events
- eventBus.on(UIEventType.PROGRESS, handlers.progress);
- eventBus.on(UIEventType.AGENT_START, handlers.agent_start);
- eventBus.on(UIEventType.AGENT_COMPLETE, handlers.agent_complete);
- eventBus.on(UIEventType.SUCCESS, handlers.success);
- eventBus.on(UIEventType.ERROR, handlers.error);
- eventBus.on(UIEventType.METRICS_UPDATE, handlers.metrics_update);
-
- // Emit all events
- eventBus.emit(UIEventType.PROGRESS, { test: 'data' });
- eventBus.emit(UIEventType.AGENT_START, { test: 'data' });
- eventBus.emit(UIEventType.AGENT_COMPLETE, { test: 'data' });
- eventBus.emit(UIEventType.SUCCESS, { test: 'data' });
- eventBus.emit(UIEventType.ERROR, { test: 'data' });
- eventBus.emit(UIEventType.METRICS_UPDATE, { test: 'data' });
-
- // All handlers should have been called
- expect(handlers.progress).toHaveBeenCalledOnce();
- expect(handlers.agent_start).toHaveBeenCalledOnce();
- expect(handlers.agent_complete).toHaveBeenCalledOnce();
- expect(handlers.success).toHaveBeenCalledOnce();
- expect(handlers.error).toHaveBeenCalledOnce();
- expect(handlers.metrics_update).toHaveBeenCalledOnce();
- });
- });
-
- describe('Memory Management', () => {
- it('should clean up all listeners with removeAllListeners', () => {
- const handler1 = vi.fn();
- const handler2 = vi.fn();
- const handler3 = vi.fn();
-
- eventBus.onProgress(handler1);
- eventBus.onSuccess(handler2);
- eventBus.onError(handler3);
-
- eventBus.removeAllListeners();
-
- // Emit events after cleanup
- eventBus.emitProgress({
- agentId: 'test',
- agentType: 'test',
- taskDescription: 'test',
- progress: 0.5,
- startTime: new Date(),
- });
- eventBus.emitSuccess({
- agentId: 'test',
- agentType: 'test',
- taskDescription: 'test',
- result: {},
- duration: 100,
- timestamp: new Date(),
- });
- eventBus.emitError({
- agentId: 'test',
- agentType: 'test',
- taskDescription: 'test',
- error: new Error('test'),
- timestamp: new Date(),
- });
-
- // No handlers should be called
- expect(handler1).not.toHaveBeenCalled();
- expect(handler2).not.toHaveBeenCalled();
- expect(handler3).not.toHaveBeenCalled();
- });
- });
-});
diff --git a/tests/ui/design-tokens.test.ts b/tests/ui/design-tokens.test.ts
deleted file mode 100644
index a2b90a47..00000000
--- a/tests/ui/design-tokens.test.ts
+++ /dev/null
@@ -1,55 +0,0 @@
-import { describe, it, expect } from 'vitest';
-import {
- operationDisplayNames,
- operationIcons,
- getOperationDisplayName,
- getOperationIcon,
-} from '../../src/ui/design-tokens';
-
-describe('Design Tokens', () => {
- describe('Operation Display Names', () => {
- it('should provide friendly names for all operations', () => {
- expect(operationDisplayNames['memesh-remember']).toBe('Memory Search');
- expect(operationDisplayNames['memesh-do']).toBe('Task Router');
- expect(operationDisplayNames['memesh-help']).toBe('Help Center');
- expect(operationDisplayNames['create-entities']).toBe('Knowledge Storage');
- });
-
- it('should handle unknown operations gracefully', () => {
- expect(operationDisplayNames['unknown-op']).toBeUndefined();
- });
- });
-
- describe('Operation Icons', () => {
- it('should provide icons for all operations', () => {
- expect(operationIcons.memory).toBe('🧠');
- expect(operationIcons.task).toBe('📋');
- expect(operationIcons.help).toBe('💡');
- expect(operationIcons.agent).toBe('🤖');
- });
- });
-
- describe('getOperationDisplayName', () => {
- it('should return display name for known operations', () => {
- expect(getOperationDisplayName('memesh-remember')).toBe('Memory Search');
- expect(getOperationDisplayName('memesh-do')).toBe('Task Router');
- });
-
- it('should format unknown operations', () => {
- expect(getOperationDisplayName('unknown-operation')).toBe('Unknown Operation');
- expect(getOperationDisplayName('memesh-test-thing')).toBe('Test Thing');
- });
- });
-
- describe('getOperationIcon', () => {
- it('should return correct icon for operations', () => {
- expect(getOperationIcon('memesh-remember')).toBe('🧠');
- expect(getOperationIcon('task-something')).toBe('📋');
- expect(getOperationIcon('help-me')).toBe('💡');
- });
-
- it('should return empty string for unknown operations', () => {
- expect(getOperationIcon('completely-unknown')).toBe('');
- });
- });
-});
diff --git a/tests/ui/types.test.ts b/tests/ui/types.test.ts
deleted file mode 100644
index 1b02eb97..00000000
--- a/tests/ui/types.test.ts
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * UI Types Test Suite
- *
- * Tests for Terminal UI type definitions
- */
-
-import { describe, it, expect } from 'vitest';
-import {
- UIEventType,
- ProgressIndicator,
- AgentStatus,
- SuccessEvent,
- ErrorEvent,
- AttributionEntry,
- MetricsSnapshot,
- DashboardState,
- DashboardConfig,
-} from '../../src/ui/types.js';
-
-describe('UI Types', () => {
- describe('UIEventType', () => {
- it('should have all required event types', () => {
- expect(UIEventType.PROGRESS).toBe('progress');
- expect(UIEventType.AGENT_START).toBe('agent_start');
- expect(UIEventType.AGENT_COMPLETE).toBe('agent_complete');
- expect(UIEventType.SUCCESS).toBe('success');
- expect(UIEventType.ERROR).toBe('error');
- expect(UIEventType.METRICS_UPDATE).toBe('metrics_update');
- });
- });
-
- describe('ProgressIndicator', () => {
- it('should create valid progress indicator', () => {
- const progress: ProgressIndicator = {
- agentId: 'test-agent-1',
- agentType: 'code-reviewer',
- taskDescription: 'Reviewing code',
- progress: 0.5,
- currentStage: 'analyzing',
- startTime: new Date(),
- };
-
- expect(progress.agentId).toBe('test-agent-1');
- expect(progress.progress).toBe(0.5);
- expect(progress.currentStage).toBe('analyzing');
- });
-
- it('should allow optional endTime', () => {
- const progress: ProgressIndicator = {
- agentId: 'test-agent-2',
- agentType: 'debugger',
- taskDescription: 'Debugging issue',
- progress: 1.0,
- currentStage: 'completed',
- startTime: new Date(),
- endTime: new Date(),
- };
-
- expect(progress.endTime).toBeDefined();
- });
- });
-
- describe('AgentStatus', () => {
- it('should create valid agent status', () => {
- const status: AgentStatus = {
- agentId: 'agent-1',
- agentType: 'frontend-developer',
- status: 'running',
- progress: 0.3,
- currentTask: 'Building component',
- startTime: new Date(),
- };
-
- expect(status.status).toBe('running');
- expect(status.progress).toBe(0.3);
- });
- });
-
- describe('SuccessEvent', () => {
- it('should create valid success event', () => {
- const success: SuccessEvent = {
- agentId: 'agent-1',
- agentType: 'test-automator',
- taskDescription: 'Running tests',
- result: { testsPass: true, coverage: 95 },
- duration: 5000,
- timestamp: new Date(),
- };
-
- expect(success.result.testsPass).toBe(true);
- expect(success.duration).toBe(5000);
- });
- });
-
- describe('ErrorEvent', () => {
- it('should create valid error event', () => {
- const error: ErrorEvent = {
- agentId: 'agent-2',
- agentType: 'backend-developer',
- taskDescription: 'API implementation',
- error: new Error('Database connection failed'),
- timestamp: new Date(),
- };
-
- expect(error.error).toBeInstanceOf(Error);
- expect(error.error.message).toBe('Database connection failed');
- });
- });
-
- describe('AttributionEntry', () => {
- it('should create valid attribution entry for success', () => {
- const entry: AttributionEntry = {
- type: 'success',
- agentType: 'code-reviewer',
- taskDescription: 'Code review completed',
- timestamp: new Date(),
- result: { issues: 0 },
- };
-
- expect(entry.type).toBe('success');
- expect(entry.result).toBeDefined();
- });
-
- it('should create valid attribution entry for error', () => {
- const entry: AttributionEntry = {
- type: 'error',
- agentType: 'deployment-engineer',
- taskDescription: 'Deployment failed',
- timestamp: new Date(),
- error: new Error('Connection timeout'),
- };
-
- expect(entry.type).toBe('error');
- expect(entry.error).toBeInstanceOf(Error);
- });
- });
-
- describe('MetricsSnapshot', () => {
- it('should create valid metrics snapshot', () => {
- const metrics: MetricsSnapshot = {
- sessionStart: new Date(),
- totalTasks: 10,
- completedTasks: 7,
- failedTasks: 1,
- agentUsageCount: {
- 'code-reviewer': 3,
- 'debugger': 2,
- 'test-automator': 2,
- },
- estimatedTimeSaved: 3600,
- tokensUsed: 50000,
- };
-
- expect(metrics.totalTasks).toBe(10);
- expect(metrics.completedTasks).toBe(7);
- expect(metrics.agentUsageCount['code-reviewer']).toBe(3);
- });
- });
-
- describe('DashboardState', () => {
- it('should create valid dashboard state', () => {
- const state: DashboardState = {
- activeAgents: new Map([
- ['agent-1', {
- agentId: 'agent-1',
- agentType: 'frontend-developer',
- status: 'running',
- progress: 0.5,
- currentTask: 'Building UI',
- startTime: new Date(),
- }],
- ]),
- recentEvents: [],
- metrics: {
- sessionStart: new Date(),
- totalTasks: 5,
- completedTasks: 2,
- failedTasks: 0,
- agentUsageCount: { 'frontend-developer': 1 },
- estimatedTimeSaved: 1800,
- tokensUsed: 25000,
- },
- };
-
- expect(state.activeAgents.size).toBe(1);
- expect(state.metrics.totalTasks).toBe(5);
- });
- });
-
- describe('DashboardConfig', () => {
- it('should create valid dashboard config', () => {
- const config: DashboardConfig = {
- updateInterval: 100,
- maxRecentEvents: 10,
- showSpinner: true,
- showMetrics: true,
- showAttribution: true,
- };
-
- expect(config.updateInterval).toBe(100);
- expect(config.maxRecentEvents).toBe(10);
- expect(config.showSpinner).toBe(true);
- });
-
- it('should use default values when not specified', () => {
- const config: DashboardConfig = {
- updateInterval: 100,
- maxRecentEvents: 10,
- };
-
- // TypeScript should allow optional fields
- expect(config.updateInterval).toBe(100);
- });
- });
-});
diff --git a/tests/unit/ClaudeMdReloader.test.ts b/tests/unit/ClaudeMdReloader.test.ts
deleted file mode 100644
index d43723fd..00000000
--- a/tests/unit/ClaudeMdReloader.test.ts
+++ /dev/null
@@ -1,192 +0,0 @@
-import { describe, it, expect, beforeEach, vi } from 'vitest';
-import { ClaudeMdReloader } from '../../src/mcp/ClaudeMdReloader.js';
-
-describe('ClaudeMdReloader', () => {
- let reloader: ClaudeMdReloader;
-
- beforeEach(() => {
- reloader = new ClaudeMdReloader();
- });
-
- it('should generate MCP resource update request', () => {
- const resourceUpdate = reloader.generateReloadRequest();
-
- expect(resourceUpdate).toMatchObject({
- method: 'resources/updated',
- params: {
- uri: expect.stringContaining('CLAUDE.md'),
- },
- });
- });
-
- it('should track reload history', () => {
- reloader.recordReload({ reason: 'token-threshold', triggeredBy: 'auto' });
- reloader.recordReload({ reason: 'manual', triggeredBy: 'user' });
-
- const history = reloader.getReloadHistory();
-
- expect(history).toHaveLength(2);
- expect(history[0]).toMatchObject({
- reason: 'token-threshold',
- triggeredBy: 'auto',
- });
- });
-
- it('should prevent reload spam (cooldown period)', () => {
- const first = reloader.canReload();
- expect(first).toBe(true);
-
- reloader.recordReload({ reason: 'test', triggeredBy: 'auto' });
-
- const second = reloader.canReload();
- expect(second).toBe(false); // Too soon after last reload
- });
-
- it('should allow reload after cooldown period', () => {
- vi.useFakeTimers();
-
- reloader.recordReload({ reason: 'test', triggeredBy: 'auto' });
- expect(reloader.canReload()).toBe(false);
-
- // Advance time past cooldown (5 minutes default)
- vi.advanceTimersByTime(6 * 60 * 1000);
-
- expect(reloader.canReload()).toBe(true);
-
- vi.useRealTimers();
- });
-
- // CRITICAL ISSUE 1: Constructor validation
- describe('Constructor validation', () => {
- it('should reject negative cooldown', () => {
- expect(() => new ClaudeMdReloader(-1000)).toThrow(
- 'cooldownMs must be positive'
- );
- });
-
- it('should reject zero cooldown', () => {
- expect(() => new ClaudeMdReloader(0)).toThrow(
- 'cooldownMs must be positive'
- );
- });
- });
-
- // CRITICAL ISSUE 2: recordReload validation
- describe('recordReload validation', () => {
- it('should reject invalid reload records - missing reason', () => {
- expect(() =>
- reloader.recordReload({ reason: '', triggeredBy: 'auto' } as any)
- ).toThrow('reason and triggeredBy are required');
- });
-
- it('should reject invalid reload records - missing triggeredBy', () => {
- expect(() =>
- reloader.recordReload({ reason: 'manual', triggeredBy: '' } as any)
- ).toThrow('reason and triggeredBy are required');
- });
- });
-
- // IMPORTANT ISSUE 5: History overflow edge case
- describe('History overflow', () => {
- it('should limit history to 50 records', () => {
- // Add 60 records
- for (let i = 0; i < 60; i++) {
- reloader.recordReload({
- reason: 'manual',
- triggeredBy: 'auto',
- metadata: { index: i },
- });
- }
-
- const history = reloader.getReloadHistory();
- expect(history).toHaveLength(50);
-
- // Verify oldest records were removed (FIFO)
- expect(history[0].metadata).toEqual({ index: 10 });
- });
- });
-
- // IMPORTANT ISSUE 6: Race condition documentation
- describe('Rapid sequential reloads', () => {
- it('should handle rapid sequential reloads', () => {
- // Simulate rapid-fire reloads
- for (let i = 0; i < 100; i++) {
- reloader.recordReload({ reason: 'manual', triggeredBy: 'auto' });
- }
-
- const history = reloader.getReloadHistory();
- expect(history.length).toBeLessThanOrEqual(50);
- });
- });
-
- // PRIORITY 1 FIX: Concurrency test
- describe('Concurrent reload handling', () => {
- it('should handle concurrent reloads without race conditions', () => {
- const concurrentReloadCount = 20;
- const records = Array.from({ length: concurrentReloadCount }, (_, i) => ({
- reason: 'token-threshold' as const,
- triggeredBy: 'auto' as const,
- metadata: { index: i },
- }));
-
- // Simulate concurrent calls (synchronous context)
- records.forEach((record) => reloader.recordReload(record));
-
- const history = reloader.getReloadHistory();
- const stats = reloader.getStats();
-
- // Verify all records were processed
- expect(history).toHaveLength(concurrentReloadCount);
-
- // Verify lastReloadTime is the last record's timestamp
- expect(stats.lastReloadTime).toBeDefined();
- const lastRecord = history[history.length - 1];
- expect(stats.lastReloadTime?.getTime()).toBe(lastRecord.timestamp?.getTime());
-
- // Verify no duplicates (all metadata indices unique)
- const indices = history.map((r) => r.metadata?.index);
- const uniqueIndices = new Set(indices);
- expect(uniqueIndices.size).toBe(concurrentReloadCount);
-
- // Verify correct ordering (indices 0-19 in order)
- for (let i = 0; i < concurrentReloadCount; i++) {
- expect(history[i].metadata?.index).toBe(i);
- }
- });
-
- it('should prevent cooldown bypass under concurrent load', () => {
- const reloaderWithShortCooldown = new ClaudeMdReloader(1000); // 1 second cooldown
-
- // First reload should succeed
- reloaderWithShortCooldown.recordReload({
- reason: 'manual',
- triggeredBy: 'user',
- });
-
- const initialHistory = reloaderWithShortCooldown.getReloadHistory();
- expect(initialHistory).toHaveLength(1);
- expect(reloaderWithShortCooldown.canReload()).toBe(false); // In cooldown
-
- // Simulate concurrent reload attempts during cooldown
- const concurrentAttempts = 10;
- for (let i = 0; i < concurrentAttempts; i++) {
- // canReload() should still be false, but we're testing recordReload mutex
- reloaderWithShortCooldown.recordReload({
- reason: 'token-threshold',
- triggeredBy: 'auto',
- metadata: { attempt: i },
- });
- }
-
- const finalHistory = reloaderWithShortCooldown.getReloadHistory();
-
- // All reloads should be recorded (mutex ensures integrity)
- expect(finalHistory).toHaveLength(1 + concurrentAttempts);
-
- // lastReloadTime should be the last record's timestamp (no race condition)
- const stats = reloaderWithShortCooldown.getStats();
- const lastRecord = finalHistory[finalHistory.length - 1];
- expect(stats.lastReloadTime?.getTime()).toBe(lastRecord.timestamp?.getTime());
- });
- });
-});
diff --git a/tests/unit/MistakePatternManager.test.ts b/tests/unit/MistakePatternManager.test.ts
deleted file mode 100644
index c0882ddf..00000000
--- a/tests/unit/MistakePatternManager.test.ts
+++ /dev/null
@@ -1,417 +0,0 @@
-// tests/unit/MistakePatternManager.test.ts
-import { describe, it, expect, beforeEach, vi } from 'vitest';
-import { MistakePatternManager } from '../../src/core/MistakePatternManager.js';
-import type { UnifiedMemoryStore } from '../../src/memory/UnifiedMemoryStore.js';
-import type { UnifiedMemory } from '../../src/memory/types/unified-memory.js';
-
-describe('MistakePatternManager', () => {
- let manager: MistakePatternManager;
- let mockMemoryStore: UnifiedMemoryStore;
-
- beforeEach(() => {
- mockMemoryStore = {
- searchByType: vi.fn().mockResolvedValue([]),
- } as unknown as UnifiedMemoryStore;
-
- manager = new MistakePatternManager(mockMemoryStore);
- });
-
- describe('Basic pattern extraction', () => {
- it('should extract patterns from mistakes', async () => {
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Failed to run tests before commit',
- importance: 0.8,
- timestamp: new Date('2025-01-01'),
- tags: ['test', 'commit'],
- metadata: {
- errorType: 'workflow-violation',
- phase: 'commit-ready',
- },
- },
- {
- id: '2',
- type: 'mistake',
- content: 'Failed to run tests before commit',
- importance: 0.9,
- timestamp: new Date('2025-01-02'),
- tags: ['test', 'commit'],
- metadata: {
- errorType: 'workflow-violation',
- phase: 'commit-ready',
- },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns('commit-ready');
-
- expect(patterns.length).toBeGreaterThan(0);
- // Should group similar mistakes together
- expect(patterns[0].occurrenceCount).toBeGreaterThanOrEqual(1);
- });
-
- it('should calculate pattern weight correctly', async () => {
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- expect(patterns[0].weight).toBeGreaterThan(0);
- expect(patterns[0].weight).toBeLessThanOrEqual(1);
- });
- });
-
- // CRITICAL-2: Future timestamp and invalid decayRate tests
- describe('CRITICAL-2: Future timestamp and decayRate validation', () => {
- it('should handle future timestamps gracefully', async () => {
- const futureDate = new Date();
- futureDate.setFullYear(futureDate.getFullYear() + 1); // 1 year in future
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: futureDate, // Future timestamp
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- // Should not crash, weight should be 0 for future timestamps
- expect(patterns[0].weight).toBe(0);
- });
-
- it('should handle NaN decayRate gracefully', async () => {
- const managerWithBadConfig = new MistakePatternManager(mockMemoryStore, {
- decayRate: NaN,
- });
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await managerWithBadConfig.extractPatterns();
-
- // Should not crash, falls back to default decayRate 0.01
- expect(patterns[0].weight).toBeGreaterThan(0);
- expect(patterns[0].weight).toBeLessThanOrEqual(1);
- });
-
- it('should handle Infinity decayRate gracefully', async () => {
- const managerWithBadConfig = new MistakePatternManager(mockMemoryStore, {
- decayRate: Infinity,
- });
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await managerWithBadConfig.extractPatterns();
-
- // Should not crash
- expect(patterns[0].weight).toBe(0.5);
- });
-
- it('should handle zero decayRate gracefully', async () => {
- const managerWithBadConfig = new MistakePatternManager(mockMemoryStore, {
- decayRate: 0,
- });
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await managerWithBadConfig.extractPatterns();
-
- // Should not crash, falls back to default decayRate 0.01
- expect(patterns[0].weight).toBeGreaterThan(0);
- expect(patterns[0].weight).toBeLessThanOrEqual(1);
- });
-
- it('should handle negative decayRate gracefully', async () => {
- const managerWithBadConfig = new MistakePatternManager(mockMemoryStore, {
- decayRate: -0.1,
- });
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await managerWithBadConfig.extractPatterns();
-
- // Should not crash
- expect(patterns[0].weight).toBe(0.5);
- });
-
- it('should handle very old timestamps correctly', async () => {
- const veryOldDate = new Date('2020-01-01'); // 5+ years ago
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: veryOldDate,
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- // Weight should be very small due to decay, but not zero
- expect(patterns[0].weight).toBeGreaterThan(0);
- expect(patterns[0].weight).toBeLessThan(0.5);
- });
-
- it('should handle recent timestamps correctly', async () => {
- const recentDate = new Date(); // Now
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: recentDate,
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- // Recent mistakes should have higher weight
- expect(patterns[0].weight).toBeGreaterThan(0.5);
- });
- });
-
- // CRITICAL-3: Empty array division by zero test
- describe('CRITICAL-3: Empty array division protection', () => {
- it('should handle empty mistakes array gracefully', async () => {
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue([]);
-
- const patterns = await manager.extractPatterns();
-
- expect(patterns).toEqual([]);
- });
-
- it('should handle single mistake correctly', async () => {
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- expect(patterns.length).toBe(1);
- expect(patterns[0].baseImportance).toBe(0.8);
- expect(patterns[0].occurrenceCount).toBe(1);
- });
-
- it('should calculate average importance correctly for multiple mistakes', async () => {
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Test mistake 1',
- importance: 0.6,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- {
- id: '2',
- type: 'mistake',
- content: 'Test mistake 2',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- {
- id: '3',
- type: 'mistake',
- content: 'Test mistake 3',
- importance: 1.0,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'test-error' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await manager.extractPatterns();
-
- // Average of 0.6, 0.8, 1.0 = 0.8
- expect(patterns[0].baseImportance).toBeCloseTo(0.8, 2);
- });
-
- it('should handle minOccurrences filter correctly', async () => {
- const managerWithMinOccurrences = new MistakePatternManager(mockMemoryStore, {
- minOccurrences: 2,
- });
-
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'Single occurrence mistake',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'unique-error' },
- },
- {
- id: '2',
- type: 'mistake',
- content: 'Repeated mistake first',
- importance: 0.8,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'repeated-error', phase: 'test' },
- },
- {
- id: '3',
- type: 'mistake',
- content: 'Repeated mistake first',
- importance: 0.9,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'repeated-error', phase: 'test' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const patterns = await managerWithMinOccurrences.extractPatterns();
-
- // Should filter out patterns with < minOccurrences
- // Pattern grouping is by signature (errorType + phase + content words)
- // All patterns should have occurrenceCount >= minOccurrences
- patterns.forEach(pattern => {
- expect(pattern.occurrenceCount).toBeGreaterThanOrEqual(2);
- });
- });
- });
-
- describe('Top patterns functionality', () => {
- it('should return top N patterns by weight', async () => {
- const mockMistakes: UnifiedMemory[] = [
- {
- id: '1',
- type: 'mistake',
- content: 'High importance mistake',
- importance: 0.9,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'error-1' },
- },
- {
- id: '2',
- type: 'mistake',
- content: 'Medium importance mistake',
- importance: 0.6,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'error-2' },
- },
- {
- id: '3',
- type: 'mistake',
- content: 'Low importance mistake',
- importance: 0.3,
- timestamp: new Date(),
- tags: [],
- metadata: { errorType: 'error-3' },
- },
- ];
-
- vi.mocked(mockMemoryStore.searchByType).mockResolvedValue(mockMistakes);
-
- const topPatterns = await manager.getTopPatterns(undefined, 2);
-
- expect(topPatterns.length).toBeLessThanOrEqual(2);
- // Should be sorted by weight descending
- if (topPatterns.length === 2) {
- expect(topPatterns[0].weight).toBeGreaterThanOrEqual(topPatterns[1].weight);
- }
- });
- });
-});
diff --git a/tests/unit/PathResolver.test.ts b/tests/unit/PathResolver.test.ts
deleted file mode 100644
index d54d7445..00000000
--- a/tests/unit/PathResolver.test.ts
+++ /dev/null
@@ -1,696 +0,0 @@
-/**
- * PathResolver Tests
- *
- * Comprehensive tests for PathResolver utility that handles backward
- * compatibility during migration from "Claude Code Buddy" to "MeMesh".
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import fs from 'fs';
-import os from 'os';
-import path from 'path';
-import {
- getDataDirectory,
- getDataPath,
- isMigrationNeeded,
- getMigrationInfo,
- _clearCache,
-} from '../../src/utils/PathResolver.js';
-import { logger } from '../../src/utils/logger.js';
-
-// Mock dependencies
-vi.mock('../../src/utils/logger.js', () => ({
- logger: {
- warn: vi.fn(),
- info: vi.fn(),
- error: vi.fn(),
- },
-}));
-
-describe('PathResolver', () => {
- // Note: We use testDir-based paths for isolated testing, not the actual home directory paths
- // These would be: path.join(os.homedir(), '.memesh') and path.join(os.homedir(), '.claude-code-buddy')
- const testDir = path.join(os.tmpdir(), `pathresolver-test-${Date.now()}`);
- const testNewDir = path.join(testDir, '.memesh');
- const testLegacyDir = path.join(testDir, '.claude-code-buddy');
-
- // Helper to mock os.homedir() to point to test directory
- const mockHomedir = (dir: string) => {
- vi.spyOn(os, 'homedir').mockReturnValue(dir);
- };
-
- // Helper to create directories
- const createDir = (dir: string) => {
- if (!fs.existsSync(dir)) {
- fs.mkdirSync(dir, { recursive: true });
- }
- };
-
- // Helper to remove directories
- const removeDir = (dir: string) => {
- if (fs.existsSync(dir)) {
- fs.rmSync(dir, { recursive: true, force: true });
- }
- };
-
- beforeEach(() => {
- // Clear cache before each test
- _clearCache();
- // Clear all mocks
- vi.clearAllMocks();
- // Create test directory structure
- createDir(testDir);
- });
-
- afterEach(() => {
- // Clean up test directories
- removeDir(testDir);
- // Restore mocks
- vi.restoreAllMocks();
- });
-
- describe('getDataDirectory()', () => {
- describe('Case 1: New directory exists', () => {
- it('should return new directory when it exists', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataDirectory();
-
- expect(result).toBe(testNewDir);
- expect(logger.warn).not.toHaveBeenCalled();
- });
-
- it('should prefer new directory even if legacy exists', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
- createDir(testLegacyDir);
-
- const result = getDataDirectory();
-
- expect(result).toBe(testNewDir);
- expect(logger.warn).not.toHaveBeenCalled();
- });
-
- it('should cache result after first call', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result1 = getDataDirectory();
- const result2 = getDataDirectory();
- const result3 = getDataDirectory();
-
- expect(result1).toBe(testNewDir);
- expect(result2).toBe(testNewDir);
- expect(result3).toBe(testNewDir);
-
- // fs.existsSync should be called only during first call
- // (we can't directly test this, but we verify consistent behavior)
- });
- });
-
- describe('Case 2: Only legacy directory exists', () => {
- it('should return legacy directory with warning', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- const result = getDataDirectory();
-
- expect(result).toBe(testLegacyDir);
- expect(logger.warn).toHaveBeenCalled();
- });
-
- it('should show migration warning with expected content', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- getDataDirectory();
-
- // Verify warning messages
- const warnCalls = (logger.warn as any).mock.calls.flat();
- const warningText = warnCalls.join(' ');
-
- expect(warningText).toContain('MIGRATION NOTICE');
- expect(warningText).toContain('Found legacy data directory');
- expect(warningText).toContain('New directory should be');
- expect(warningText).toContain('MeMesh is using your existing data');
- expect(warningText).toContain('./scripts/migrate-from-ccb.sh');
- expect(warningText).toContain('This warning appears once per session');
- });
-
- it('should show warning only once per session', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- getDataDirectory();
- getDataDirectory();
- getDataDirectory();
-
- // Warning should be logged multiple times (for the multi-line message)
- // but only during the first call
- const firstCallWarnings = (logger.warn as any).mock.calls.length;
- expect(firstCallWarnings).toBeGreaterThan(0);
-
- // Clear mocks and call again
- vi.clearAllMocks();
- getDataDirectory();
-
- // No new warnings should be logged
- expect(logger.warn).not.toHaveBeenCalled();
- });
-
- it('should cache legacy directory path', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- const result1 = getDataDirectory();
- const result2 = getDataDirectory();
-
- expect(result1).toBe(testLegacyDir);
- expect(result2).toBe(testLegacyDir);
- });
- });
-
- describe('Case 3: Neither directory exists', () => {
- it('should create and return new directory', () => {
- mockHomedir(testDir);
-
- const result = getDataDirectory();
-
- expect(result).toBe(testNewDir);
- expect(fs.existsSync(testNewDir)).toBe(true);
- expect(logger.info).toHaveBeenCalledWith(expect.stringContaining('Created data directory'));
- });
-
- it('should cache newly created directory', () => {
- mockHomedir(testDir);
-
- const result1 = getDataDirectory();
- const result2 = getDataDirectory();
-
- expect(result1).toBe(testNewDir);
- expect(result2).toBe(testNewDir);
- // Info should only be logged once
- expect(logger.info).toHaveBeenCalledTimes(1);
- });
-
- it('should create nested directories if needed', () => {
- const deepTestDir = path.join(testDir, 'nested', 'deep', 'path');
- mockHomedir(deepTestDir);
-
- const result = getDataDirectory();
- const expectedDir = path.join(deepTestDir, '.memesh');
-
- expect(result).toBe(expectedDir);
- expect(fs.existsSync(expectedDir)).toBe(true);
- });
- });
-
- describe('Error handling', () => {
- it('should throw error when directory creation fails', () => {
- mockHomedir(testDir);
-
- // Mock mkdirSync to throw an error
- const mkdirSyncSpy = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => {
- throw new Error('Permission denied');
- });
-
- expect(() => getDataDirectory()).toThrow('Failed to create data directory');
- expect(logger.error).toHaveBeenCalled();
-
- mkdirSyncSpy.mockRestore();
- });
-
- it('should include original error in thrown error', () => {
- mockHomedir(testDir);
-
- const originalError = new Error('EACCES: permission denied');
- const mkdirSyncSpy = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => {
- throw originalError;
- });
-
- expect(() => getDataDirectory()).toThrow('Failed to create data directory');
-
- mkdirSyncSpy.mockRestore();
- });
- });
-
- describe('Cache behavior', () => {
- it('should return cached value on subsequent calls', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const existsSpy = vi.spyOn(fs, 'existsSync');
-
- const result1 = getDataDirectory();
- const callsAfterFirst = existsSpy.mock.calls.length;
-
- const result2 = getDataDirectory();
- const callsAfterSecond = existsSpy.mock.calls.length;
-
- expect(result1).toBe(result2);
- expect(callsAfterSecond).toBe(callsAfterFirst); // No additional fs calls
-
- existsSpy.mockRestore();
- });
-
- it('should reset cache with _clearCache()', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- getDataDirectory();
- _clearCache();
-
- // After clearing cache, should evaluate again
- const result = getDataDirectory();
- expect(result).toBe(testNewDir);
- });
-
- it('should reset migration warning flag with _clearCache()', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- getDataDirectory();
- expect(logger.warn).toHaveBeenCalled();
-
- vi.clearAllMocks();
- getDataDirectory();
- expect(logger.warn).not.toHaveBeenCalled(); // Warning not shown again
-
- _clearCache();
- vi.clearAllMocks();
- getDataDirectory();
- expect(logger.warn).toHaveBeenCalled(); // Warning shown after cache clear
- });
- });
- });
-
- describe('getDataPath()', () => {
- it('should return correct path for simple filename', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataPath('test.db');
-
- expect(result).toBe(path.join(testNewDir, 'test.db'));
- });
-
- it('should handle nested paths', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataPath('subdir/file.db');
-
- expect(result).toBe(path.join(testNewDir, 'subdir', 'file.db'));
- });
-
- it('should handle deep nested paths', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataPath('deep/nested/subdir/file.json');
-
- expect(result).toBe(path.join(testNewDir, 'deep', 'nested', 'subdir', 'file.json'));
- });
-
- it('should use cached data directory', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const existsSpy = vi.spyOn(fs, 'existsSync');
-
- // First call caches directory
- getDataPath('file1.db');
- const callsAfterFirst = existsSpy.mock.calls.length;
-
- // Subsequent calls use cache
- getDataPath('file2.db');
- const callsAfterSecond = existsSpy.mock.calls.length;
-
- expect(callsAfterSecond).toBe(callsAfterFirst);
-
- existsSpy.mockRestore();
- });
-
- it('should work with various file extensions', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const extensions = ['.db', '.json', '.txt', '.log', '.sqlite'];
- extensions.forEach((ext) => {
- const result = getDataPath(`file${ext}`);
- expect(result).toBe(path.join(testNewDir, `file${ext}`));
- });
- });
-
- it('should handle files without extensions', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataPath('lockfile');
-
- expect(result).toBe(path.join(testNewDir, 'lockfile'));
- });
-
- it('should handle empty filename', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = getDataPath('');
-
- expect(result).toBe(testNewDir);
- });
- });
-
- describe('isMigrationNeeded()', () => {
- it('should return true when legacy exists but new does not', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- const result = isMigrationNeeded();
-
- expect(result).toBe(true);
- });
-
- it('should return false when new directory exists', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result = isMigrationNeeded();
-
- expect(result).toBe(false);
- });
-
- it('should return false when neither directory exists', () => {
- mockHomedir(testDir);
-
- const result = isMigrationNeeded();
-
- expect(result).toBe(false);
- });
-
- it('should return false when both directories exist', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
- createDir(testLegacyDir);
-
- const result = isMigrationNeeded();
-
- expect(result).toBe(false);
- });
-
- it('should not modify filesystem', () => {
- mockHomedir(testDir);
-
- isMigrationNeeded();
-
- expect(fs.existsSync(testNewDir)).toBe(false);
- expect(fs.existsSync(testLegacyDir)).toBe(false);
- });
-
- it('should not trigger cache or warnings', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- isMigrationNeeded();
-
- expect(logger.warn).not.toHaveBeenCalled();
- });
- });
-
- describe('getMigrationInfo()', () => {
- it('should return complete info when only new directory exists', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const info = getMigrationInfo();
-
- expect(info).toEqual({
- newDir: testNewDir,
- legacyDir: testLegacyDir,
- newDirExists: true,
- legacyDirExists: false,
- migrationNeeded: false,
- currentlyUsing: testNewDir,
- });
- });
-
- it('should return complete info when only legacy directory exists', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- const info = getMigrationInfo();
-
- expect(info).toEqual({
- newDir: testNewDir,
- legacyDir: testLegacyDir,
- newDirExists: false,
- legacyDirExists: true,
- migrationNeeded: true,
- currentlyUsing: testLegacyDir,
- });
- });
-
- it('should return complete info when neither directory exists', () => {
- mockHomedir(testDir);
-
- const info = getMigrationInfo();
-
- expect(info.newDir).toBe(testNewDir);
- expect(info.legacyDir).toBe(testLegacyDir);
- expect(info.newDirExists).toBe(false);
- expect(info.legacyDirExists).toBe(false);
- expect(info.migrationNeeded).toBe(false);
- expect(info.currentlyUsing).toBe(testNewDir); // Creates new dir
- // Verify new directory was created
- expect(fs.existsSync(testNewDir)).toBe(true);
- });
-
- it('should return complete info when both directories exist', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
- createDir(testLegacyDir);
-
- const info = getMigrationInfo();
-
- expect(info).toEqual({
- newDir: testNewDir,
- legacyDir: testLegacyDir,
- newDirExists: true,
- legacyDirExists: true,
- migrationNeeded: false,
- currentlyUsing: testNewDir,
- });
- });
-
- it('should have correct property types', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const info = getMigrationInfo();
-
- expect(typeof info.newDir).toBe('string');
- expect(typeof info.legacyDir).toBe('string');
- expect(typeof info.newDirExists).toBe('boolean');
- expect(typeof info.legacyDirExists).toBe('boolean');
- expect(typeof info.migrationNeeded).toBe('boolean');
- expect(typeof info.currentlyUsing).toBe('string');
- });
-
- it('should call getDataDirectory() internally', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const info = getMigrationInfo();
-
- // currentlyUsing should be the result of getDataDirectory()
- expect(info.currentlyUsing).toBe(getDataDirectory());
- });
- });
-
- describe('Integration scenarios', () => {
- it('should handle complete migration workflow', () => {
- mockHomedir(testDir);
-
- // Step 1: Initial setup - no directories exist
- const info1 = getMigrationInfo();
- expect(info1.migrationNeeded).toBe(false);
- expect(info1.newDirExists).toBe(false);
- expect(info1.legacyDirExists).toBe(false);
-
- // Step 2: User has legacy directory
- _clearCache();
- removeDir(testNewDir);
- createDir(testLegacyDir);
-
- const info2 = getMigrationInfo();
- expect(info2.migrationNeeded).toBe(true);
- expect(info2.currentlyUsing).toBe(testLegacyDir);
-
- // Step 3: After migration - both exist
- _clearCache();
- createDir(testNewDir);
-
- const info3 = getMigrationInfo();
- expect(info3.migrationNeeded).toBe(false);
- expect(info3.currentlyUsing).toBe(testNewDir);
- });
-
- it('should work correctly with multiple getDataPath() calls', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const paths = [
- getDataPath('database.db'),
- getDataPath('config.json'),
- getDataPath('logs/app.log'),
- getDataPath('cache/temp.txt'),
- ];
-
- paths.forEach((p) => {
- expect(p.startsWith(testNewDir)).toBe(true);
- });
- });
-
- it('should maintain consistency across multiple function calls', () => {
- mockHomedir(testDir);
- createDir(testLegacyDir);
-
- const dir1 = getDataDirectory();
- const path1 = getDataPath('test.db');
- const info = getMigrationInfo();
- const dir2 = getDataDirectory();
-
- expect(dir1).toBe(dir2);
- expect(path1).toBe(path.join(dir1, 'test.db'));
- expect(info.currentlyUsing).toBe(dir1);
- });
- });
-
- describe('Edge cases', () => {
- it('should handle concurrent access (sequential calls)', () => {
- mockHomedir(testDir);
-
- // Multiple rapid calls should all return same result
- const results = Array.from({ length: 10 }, () => getDataDirectory());
- const uniqueResults = new Set(results);
-
- expect(uniqueResults.size).toBe(1);
- expect(fs.existsSync(testNewDir)).toBe(true);
- });
-
- it('should handle special characters in home directory', () => {
- const specialDir = path.join(testDir, 'path with spaces & special-chars');
- createDir(specialDir);
- mockHomedir(specialDir);
-
- const result = getDataDirectory();
-
- expect(result).toBe(path.join(specialDir, '.memesh'));
- expect(fs.existsSync(result)).toBe(true);
- });
-
- it('should handle very long nested paths', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const deepPath = 'a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/file.db';
- const result = getDataPath(deepPath);
-
- expect(result).toBe(path.join(testNewDir, deepPath));
- });
-
- it('should normalize paths correctly', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const result1 = getDataPath('./file.db');
- const result2 = getDataPath('file.db');
-
- // Both should resolve to same normalized path
- expect(path.normalize(result1)).toBe(path.normalize(result2));
- });
- });
-
- describe('Real-world usage patterns', () => {
- it('should support KnowledgeGraph use case', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const kgPath = getDataPath('knowledge-graph.db');
-
- expect(kgPath).toBe(path.join(testNewDir, 'knowledge-graph.db'));
- });
-
- it('should support TaskQueue use case', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const queuePath = getDataPath('task-queue.db');
-
- expect(queuePath).toBe(path.join(testNewDir, 'task-queue.db'));
- });
-
- it('should support multiple database files', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const files = [
- 'knowledge-graph.db',
- 'task-queue.db',
- 'evolution.db',
- 'metrics.db',
- ];
-
- const paths = files.map((f) => getDataPath(f));
-
- paths.forEach((p, i) => {
- expect(p).toBe(path.join(testNewDir, files[i]));
- });
-
- // All paths should be in same directory
- const dirs = paths.map((p) => path.dirname(p));
- expect(new Set(dirs).size).toBe(1);
- });
- });
-
- describe('Performance considerations', () => {
- it('should be efficient with cached results', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const startTime = Date.now();
-
- // Make many calls - should be very fast due to caching
- for (let i = 0; i < 1000; i++) {
- getDataDirectory();
- getDataPath(`file${i}.db`);
- }
-
- const elapsed = Date.now() - startTime;
-
- // With caching, 2000 calls should be very fast (< 100ms)
- expect(elapsed).toBeLessThan(100);
- });
-
- it('should minimize filesystem checks with caching', () => {
- mockHomedir(testDir);
- createDir(testNewDir);
-
- const existsSpy = vi.spyOn(fs, 'existsSync');
-
- // First call checks filesystem
- getDataDirectory();
- const checksAfterFirst = existsSpy.mock.calls.length;
-
- // Additional calls should not check filesystem
- getDataDirectory();
- getDataDirectory();
- getDataDirectory();
-
- expect(existsSpy.mock.calls.length).toBe(checksAfterFirst);
-
- existsSpy.mockRestore();
- });
- });
-});
diff --git a/tests/unit/ProgressReporter.test.ts b/tests/unit/ProgressReporter.test.ts
deleted file mode 100644
index a0fde4c5..00000000
--- a/tests/unit/ProgressReporter.test.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-// tests/unit/ProgressReporter.test.ts
-import { describe, it, expect, vi } from 'vitest';
-import { ProgressReporter } from '../../src/mcp/ProgressReporter';
-
-describe('ProgressReporter', () => {
- it('should report progress updates', async () => {
- const mockSendProgress = vi.fn();
- const reporter = new ProgressReporter('test-token', mockSendProgress);
-
- await reporter.report(1, 5, 'Processing task 1/5');
-
- expect(mockSendProgress).toHaveBeenCalledWith({
- progressToken: 'test-token',
- progress: 1,
- total: 5,
- });
- });
-
- it('should not report when token is missing', async () => {
- const mockSendProgress = vi.fn();
- const reporter = new ProgressReporter(undefined, mockSendProgress);
-
- await reporter.report(1, 5, 'Processing');
-
- expect(mockSendProgress).not.toHaveBeenCalled();
- });
-});
diff --git a/tests/unit/SamplingClient.test.ts b/tests/unit/SamplingClient.test.ts
deleted file mode 100644
index 3cd974f0..00000000
--- a/tests/unit/SamplingClient.test.ts
+++ /dev/null
@@ -1,85 +0,0 @@
-// tests/unit/SamplingClient.test.ts
-import { describe, it, expect, vi } from 'vitest';
-import { SamplingClient } from '../../src/mcp/SamplingClient';
-
-describe('SamplingClient', () => {
- it('should create sampling request', async () => {
- const mockSample = vi.fn().mockResolvedValue({
- role: 'assistant',
- content: { type: 'text', text: 'Generated content' }
- });
-
- const client = new SamplingClient(mockSample);
- const result = await client.generate('Test prompt', { maxTokens: 100 });
-
- expect(result).toBe('Generated content');
- expect(mockSample).toHaveBeenCalledWith({
- messages: [{ role: 'user', content: 'Test prompt' }],
- maxTokens: 100,
- });
- });
-
- it('should generate with conversation history', async () => {
- const mockSample = vi.fn().mockResolvedValue({
- role: 'assistant',
- content: { type: 'text', text: 'Response to conversation' }
- });
-
- const client = new SamplingClient(mockSample);
- const messages = [
- { role: 'user' as const, content: 'Hello' },
- { role: 'assistant' as const, content: 'Hi there' },
- { role: 'user' as const, content: 'How are you?' },
- ];
- const result = await client.generateWithHistory(messages, { maxTokens: 100 });
-
- expect(result).toBe('Response to conversation');
- expect(mockSample).toHaveBeenCalledWith({
- messages,
- maxTokens: 100,
- });
- });
-
- it('should throw error for empty prompt', async () => {
- const mockSample = vi.fn();
- const client = new SamplingClient(mockSample);
-
- await expect(
- client.generate('', { maxTokens: 100 })
- ).rejects.toThrow('Prompt cannot be empty');
-
- expect(mockSample).not.toHaveBeenCalled();
- });
-
- it('should throw error for invalid maxTokens', async () => {
- const mockSample = vi.fn();
- const client = new SamplingClient(mockSample);
-
- await expect(
- client.generate('Test', { maxTokens: 0 })
- ).rejects.toThrow('maxTokens must be positive');
- });
-
- it('should throw error for malformed response', async () => {
- const mockSample = vi.fn().mockResolvedValue({
- role: 'assistant',
- content: null // Malformed
- });
-
- const client = new SamplingClient(mockSample);
-
- await expect(
- client.generate('Test', { maxTokens: 100 })
- ).rejects.toThrow('Invalid response from sampling function');
- });
-
- it('should propagate sampling function errors', async () => {
- const mockSample = vi.fn().mockRejectedValue(new Error('Network error'));
-
- const client = new SamplingClient(mockSample);
-
- await expect(
- client.generate('Test', { maxTokens: 100 })
- ).rejects.toThrow('Sampling failed: Network error');
- });
-});
diff --git a/tests/unit/TestGenerator.test.ts b/tests/unit/TestGenerator.test.ts
deleted file mode 100644
index 72497040..00000000
--- a/tests/unit/TestGenerator.test.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-// tests/unit/TestGenerator.test.ts
-import { describe, it, expect, vi } from 'vitest';
-import { TestGenerator } from '../../src/tools/TestGenerator';
-
-describe('TestGenerator', () => {
- it('should generate test cases from specification', async () => {
- const mockGenerate = vi.fn().mockResolvedValue(`
-describe('Calculator', () => {
- it('should add two numbers', () => {
- expect(add(1, 2)).toBe(3);
- });
-});
- `);
-
- const generator = new TestGenerator({ generate: mockGenerate } as any);
- const result = await generator.generateTests('Add function that sums two numbers');
-
- expect(result).toContain('describe');
- expect(result).toContain('add(1, 2)');
- });
-});
diff --git a/tests/unit/cloud/MeMeshCloudClient.test.ts b/tests/unit/cloud/MeMeshCloudClient.test.ts
deleted file mode 100644
index 3a32e3ca..00000000
--- a/tests/unit/cloud/MeMeshCloudClient.test.ts
+++ /dev/null
@@ -1,352 +0,0 @@
-import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
-import {
- MeMeshCloudClient,
- getCloudClient,
- isCloudEnabled,
- resetCloudClient,
-} from '../../../src/cloud/MeMeshCloudClient.js';
-
-// Mock fs to prevent credentials file from interfering with tests.
-// On dev machines with a real credentials file (~/.config/memesh/credentials.json),
-// the MeMeshCloudClient constructor falls back to reading it when apiKey is empty,
-// making tests environment-dependent. This mock ensures test isolation.
-vi.mock('fs', async () => {
- const actual = await vi.importActual('fs');
- return {
- ...actual,
- existsSync: vi.fn((p: string) => {
- // Block credentials file reads; allow everything else
- if (typeof p === 'string' && p.endsWith('memesh/credentials.json')) return false;
- return actual.existsSync(p);
- }),
- };
-});
-
-describe('MeMeshCloudClient', () => {
- let client: MeMeshCloudClient;
-
- beforeEach(() => {
- client = new MeMeshCloudClient('mk_test_key_123', 'https://test.api.memesh.ai', 5000);
- });
-
- afterEach(() => {
- vi.restoreAllMocks();
- });
-
- describe('constructor', () => {
- it('should create client with explicit parameters', () => {
- const c = new MeMeshCloudClient('key', 'https://example.com/', 3000);
- expect(c.isConfigured).toBe(true);
- });
-
- it('should strip trailing slash from base URL', () => {
- const c = new MeMeshCloudClient('key', 'https://example.com/');
- expect(c.isConfigured).toBe(true);
- });
-
- it('should report not configured when no API key', () => {
- const c = new MeMeshCloudClient('', 'https://example.com');
- expect(c.isConfigured).toBe(false);
- });
- });
-
- describe('isConfigured', () => {
- it('should return true when API key is set', () => {
- expect(client.isConfigured).toBe(true);
- });
-
- it('should return false when API key is empty', () => {
- const noKeyClient = new MeMeshCloudClient('');
- expect(noKeyClient.isConfigured).toBe(false);
- });
- });
-
- describe('authenticate', () => {
- it('should return invalid when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- const result = await noKeyClient.authenticate();
- expect(result.valid).toBe(false);
- });
-
- it('should call /agents/me and return agent info on success', async () => {
- const mockAgent = {
- id: 'agent-1',
- agentType: 'claude',
- agentName: 'Test Agent',
- status: 'online',
- capabilities: {},
- createdAt: '2026-01-01T00:00:00Z',
- };
- vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify(mockAgent), { status: 200 })
- );
-
- const result = await client.authenticate();
- expect(result.valid).toBe(true);
- expect(result.agentId).toBe('agent-1');
- expect(result.agentType).toBe('claude');
- });
-
- it('should return invalid on auth failure', async () => {
- vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response('Unauthorized', { status: 401 })
- );
-
- const result = await client.authenticate();
- expect(result.valid).toBe(false);
- });
- });
-
- describe('writeMemory', () => {
- it('should throw when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- await expect(noKeyClient.writeMemory({
- content: 'test',
- space: 'default',
- })).rejects.toThrow('MeMesh Cloud API key not configured');
- });
-
- it('should POST to /memory/write with x-api-key header', async () => {
- const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify({ id: 'mem-1' }), { status: 200 })
- );
-
- const id = await client.writeMemory({
- content: 'Test memory',
- space: 'default',
- tags: ['test'],
- });
-
- expect(id).toBe('mem-1');
- expect(fetchSpy).toHaveBeenCalledWith(
- 'https://test.api.memesh.ai/memory/write',
- expect.objectContaining({
- method: 'POST',
- headers: expect.objectContaining({
- 'x-api-key': 'mk_test_key_123',
- }),
- })
- );
- });
- });
-
- describe('writeMemories (batch)', () => {
- it('should POST batch to /memory/batch endpoint', async () => {
- const mockResult = {
- total: 2,
- succeeded: 2,
- failed: 0,
- successes: [
- { index: 0, id: 'm1', content: 'Memory 1', createdAt: '2026-01-01' },
- { index: 1, id: 'm2', content: 'Memory 2', createdAt: '2026-01-01' },
- ],
- failures: [],
- transactional: false,
- };
- const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify(mockResult), { status: 200 })
- );
-
- const result = await client.writeMemories([
- { content: 'Memory 1', space: 'default', tags: ['a'] },
- { content: 'Memory 2', space: 'default', tags: ['b'] },
- ]);
-
- expect(result.total).toBe(2);
- expect(result.succeeded).toBe(2);
- expect(result.failed).toBe(0);
- expect(result.successes).toHaveLength(2);
-
- // Verify batch payload shape
- const body = JSON.parse(fetchSpy.mock.calls[0][1]?.body as string);
- expect(body.memories).toHaveLength(2);
- expect(body.transactional).toBe(false);
- });
-
- it('should throw when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- await expect(noKeyClient.writeMemories([{
- content: 'test',
- space: 'default',
- }])).rejects.toThrow('MeMesh Cloud API key not configured');
- });
- });
-
- describe('searchMemory', () => {
- it('should GET /memory/search with query params', async () => {
- const mockResult = [
- { id: 'm1', content: 'Result', space: 'default', tags: ['a'], createdAt: '2026-01-01' },
- ];
- const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify(mockResult), { status: 200 })
- );
-
- const results = await client.searchMemory('api design', { limit: 10 });
-
- expect(results).toHaveLength(1);
- expect(results[0].id).toBe('m1');
-
- const calledUrl = fetchSpy.mock.calls[0][0] as string;
- expect(calledUrl).toContain('/memory/search');
- expect(calledUrl).toContain('query=api+design');
- expect(calledUrl).toContain('limit=10');
- });
-
- it('should pass spaces filter when provided', async () => {
- const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify([]), { status: 200 })
- );
-
- await client.searchMemory('test', { spaces: ['work', 'personal'] });
-
- const calledUrl = fetchSpy.mock.calls[0][0] as string;
- expect(calledUrl).toContain('spaces=work%2Cpersonal');
- });
-
- it('should throw when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- await expect(noKeyClient.searchMemory('test'))
- .rejects.toThrow('MeMesh Cloud API key not configured');
- });
- });
-
- describe('getSyncStatus', () => {
- it('should return disconnected when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- const result = await noKeyClient.getSyncStatus(42);
- expect(result.connected).toBe(false);
- expect(result.localCount).toBe(42);
- expect(result.cloudCount).toBe(0);
- });
-
- it('should return sync status from /memory/count', async () => {
- vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify({ count: 100 }), { status: 200 })
- );
-
- const result = await client.getSyncStatus(50);
- expect(result.connected).toBe(true);
- expect(result.localCount).toBe(50);
- expect(result.cloudCount).toBe(100);
- });
-
- it('should return disconnected on network error', async () => {
- vi.spyOn(globalThis, 'fetch').mockRejectedValueOnce(new Error('Network error'));
-
- const result = await client.getSyncStatus(50);
- expect(result.connected).toBe(false);
- });
- });
-
- describe('registerAgent', () => {
- it('should POST to /agents/register and return CloudAgentInfo', async () => {
- const mockAgent = {
- id: 'agent-1',
- agentType: 'claude',
- agentName: 'Test Agent',
- status: 'online',
- capabilities: { platform: 'claude-code' },
- createdAt: '2026-01-01T00:00:00Z',
- };
- const fetchSpy = vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(JSON.stringify(mockAgent), { status: 200 })
- );
-
- const result = await client.registerAgent({
- agentType: 'claude',
- agentName: 'Test Agent',
- capabilities: { platform: 'claude-code' },
- });
-
- expect(result.id).toBe('agent-1');
- expect(result.agentType).toBe('claude');
-
- const calledUrl = fetchSpy.mock.calls[0][0] as string;
- expect(calledUrl).toContain('/agents/register');
- });
-
- it('should throw when not configured', async () => {
- const noKeyClient = new MeMeshCloudClient('');
- await expect(noKeyClient.registerAgent({
- agentType: 'test',
- })).rejects.toThrow('MeMesh Cloud API key not configured');
- });
- });
-
- describe('error handling', () => {
- it('should throw ExternalServiceError on HTTP errors', async () => {
- vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response('Internal Server Error', { status: 500 })
- );
-
- await expect(client.writeMemory({
- content: 'test',
- space: 'default',
- })).rejects.toThrow('MeMesh Cloud API error: 500');
- });
-
- it('should truncate long error bodies to 200 chars', async () => {
- const longBody = 'x'.repeat(500);
- vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce(
- new Response(longBody, { status: 500 })
- );
-
- try {
- await client.writeMemory({ content: 'test', space: 'default' });
- expect.fail('Should have thrown');
- } catch (error: unknown) {
- const err = error as Error & { context?: Record };
- expect(err.context?.response).toBeDefined();
- const responseStr = String(err.context?.response);
- expect(responseStr.length).toBeLessThanOrEqual(204); // 200 + '...'
- }
- });
-
- it('should throw timeout error on abort', async () => {
- vi.spyOn(globalThis, 'fetch').mockRejectedValueOnce(
- new DOMException('The operation was aborted', 'AbortError')
- );
-
- await expect(client.writeMemory({
- content: 'test',
- space: 'default',
- })).rejects.toThrow('timed out');
- });
-
- });
-});
-
-describe('getCloudClient / isCloudEnabled / resetCloudClient', () => {
- afterEach(() => {
- resetCloudClient();
- vi.restoreAllMocks();
- });
-
- it('isCloudEnabled should return false when MEMESH_API_KEY not set', () => {
- const orig = process.env.MEMESH_API_KEY;
- delete process.env.MEMESH_API_KEY;
- expect(isCloudEnabled()).toBe(false);
- if (orig) process.env.MEMESH_API_KEY = orig;
- });
-
- it('isCloudEnabled should return true when MEMESH_API_KEY is set', () => {
- const orig = process.env.MEMESH_API_KEY;
- process.env.MEMESH_API_KEY = 'mk_test';
- expect(isCloudEnabled()).toBe(true);
- if (orig) process.env.MEMESH_API_KEY = orig;
- else delete process.env.MEMESH_API_KEY;
- });
-
- it('getCloudClient should return a singleton', () => {
- const a = getCloudClient();
- const b = getCloudClient();
- expect(a).toBe(b);
- });
-
- it('resetCloudClient should clear the singleton', () => {
- const a = getCloudClient();
- resetCloudClient();
- const b = getCloudClient();
- expect(a).not.toBe(b);
- });
-});
diff --git a/tests/unit/core/HookIntegration.test.ts b/tests/unit/core/HookIntegration.test.ts
deleted file mode 100644
index fba8c9f0..00000000
--- a/tests/unit/core/HookIntegration.test.ts
+++ /dev/null
@@ -1,631 +0,0 @@
-/**
- * HookIntegration Tests
- *
- * Tests for the Claude Code Hooks bridge that monitors tool execution and
- * automatically triggers workflow checkpoints.
- *
- * Test coverage:
- * - Constructor / initialization
- * - detectCheckpointFromToolUse: Write, Edit, Bash (test/git), null cases
- * - processToolUse: checkpoint triggering, callbacks, project memory recording
- * - onButlerTrigger: callback registration and execution
- * - setProjectMemory: late initialization guard
- * - Token tracking via processToolUse
- * - Error auto-detection from command output
- * - Edge cases (failed tools, missing args, non-checkpoint tools)
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import { HookIntegration } from '../../../src/core/HookIntegration.js';
-import { CheckpointDetector } from '../../../src/core/CheckpointDetector.js';
-import { ProjectAutoTracker } from '../../../src/memory/ProjectAutoTracker.js';
-import type { ToolUseData, CheckpointContext } from '../../../src/core/HookIntegration.js';
-import type { MCPToolInterface } from '../../../src/core/MCPToolInterface.js';
-
-// ─── Mock Factories ──────────────────────────────────────────────────────────
-
-function createMockMcp(): MCPToolInterface {
- return {
- memory: {
- createEntities: vi.fn().mockResolvedValue(undefined),
- searchNodes: vi.fn().mockResolvedValue([]),
- },
- } as unknown as MCPToolInterface;
-}
-
-function createMockDetector(): CheckpointDetector {
- const detector = new CheckpointDetector();
- // Register all checkpoints that HookIntegration may trigger
- const checkpoints = ['code-written', 'test-complete', 'commit-ready', 'committed'];
- for (const name of checkpoints) {
- detector.registerCheckpoint(name, async () => ({ success: true }));
- }
- return detector;
-}
-
-function createMockTracker(): ProjectAutoTracker {
- const mcp = createMockMcp();
- return new ProjectAutoTracker(mcp);
-}
-
-// ─── Common ToolUseData fixtures ─────────────────────────────────────────────
-
-const writeToolData: ToolUseData = {
- toolName: 'Write',
- arguments: { file_path: 'src/api/users.ts', content: 'export const foo = 1;' },
- success: true,
- duration: 120,
- tokensUsed: 800,
-};
-
-const editToolData: ToolUseData = {
- toolName: 'Edit',
- arguments: { file_path: 'src/utils.ts', old_string: 'old', new_string: 'new' },
- success: true,
- duration: 80,
-};
-
-const testBashData: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'npx vitest run' },
- success: true,
- output: '10 passed, 0 failed',
-};
-
-const gitAddBashData: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'git add .' },
- success: true,
-};
-
-const gitCommitBashData: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'git commit -m "feat: add users"' },
- success: true,
- output: '[main abc1234] feat: add users',
-};
-
-// ─── Suite ───────────────────────────────────────────────────────────────────
-
-describe('HookIntegration', () => {
- let detector: CheckpointDetector;
- let hooks: HookIntegration;
-
- beforeEach(() => {
- detector = createMockDetector();
- hooks = new HookIntegration(detector);
- });
-
- afterEach(() => {
- vi.restoreAllMocks();
- });
-
- // ── Constructor ─────────────────────────────────────────────────────────────
-
- describe('constructor', () => {
- it('should instantiate without errors when only detector is provided', () => {
- expect(() => new HookIntegration(detector)).not.toThrow();
- });
-
- it('should instantiate without errors when projectAutoTracker is provided', () => {
- const tracker = createMockTracker();
- expect(() => new HookIntegration(detector, tracker)).not.toThrow();
- });
- });
-
- // ── detectCheckpointFromToolUse ──────────────────────────────────────────────
-
- describe('detectCheckpointFromToolUse()', () => {
- describe('Write tool', () => {
- it('should detect code-written checkpoint for Write tool', async () => {
- const result = await hooks.detectCheckpointFromToolUse(writeToolData);
-
- expect(result).not.toBeNull();
- expect(result!.name).toBe('code-written');
- });
-
- it('should include the file path in checkpoint data', async () => {
- const result = await hooks.detectCheckpointFromToolUse(writeToolData);
-
- expect(result!.data.files).toEqual(['src/api/users.ts']);
- });
-
- it('should set type to new-file for Write tool', async () => {
- const result = await hooks.detectCheckpointFromToolUse(writeToolData);
-
- expect(result!.data.type).toBe('new-file');
- });
-
- it('should mark test files with hasTests = true', async () => {
- const testFileData: ToolUseData = {
- toolName: 'Write',
- arguments: { file_path: 'src/api.test.ts', content: '' },
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(testFileData);
-
- expect(result!.data.hasTests).toBe(true);
- });
-
- it('should mark non-test files with hasTests = false', async () => {
- const result = await hooks.detectCheckpointFromToolUse(writeToolData);
- expect(result!.data.hasTests).toBe(false);
- });
- });
-
- describe('Edit tool', () => {
- it('should detect code-written checkpoint for Edit tool', async () => {
- const result = await hooks.detectCheckpointFromToolUse(editToolData);
-
- expect(result).not.toBeNull();
- expect(result!.name).toBe('code-written');
- });
-
- it('should set type to modification for Edit tool', async () => {
- const result = await hooks.detectCheckpointFromToolUse(editToolData);
-
- expect(result!.data.type).toBe('modification');
- });
-
- it('should include the file path in Edit checkpoint data', async () => {
- const result = await hooks.detectCheckpointFromToolUse(editToolData);
-
- expect(result!.data.files).toEqual(['src/utils.ts']);
- });
- });
-
- describe('Bash tool — test commands', () => {
- it('should detect test-complete for npm test command', async () => {
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'npm test' },
- success: true,
- output: '5 passed',
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result).not.toBeNull();
- expect(result!.name).toBe('test-complete');
- });
-
- it('should detect test-complete for vitest command', async () => {
- const result = await hooks.detectCheckpointFromToolUse(testBashData);
-
- expect(result!.name).toBe('test-complete');
- });
-
- it('should detect test-complete for jest command', async () => {
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'jest --coverage' },
- success: true,
- output: '3 passed',
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result!.name).toBe('test-complete');
- });
-
- it('should include parsed test counts in checkpoint data', async () => {
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'npm test' },
- success: true,
- output: '34 tests passed, 2 tests failed',
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result!.data.total).toBe(36);
- expect(result!.data.passed).toBe(34);
- expect(result!.data.failed).toBe(2);
- });
-
- it('should handle empty test output gracefully (zero counts)', async () => {
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'npm test' },
- success: true,
- output: '',
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result!.name).toBe('test-complete');
- expect(result!.data.total).toBe(0);
- });
- });
-
- describe('Bash tool — git commands', () => {
- it('should detect commit-ready for git add command', async () => {
- const result = await hooks.detectCheckpointFromToolUse(gitAddBashData);
-
- expect(result).not.toBeNull();
- expect(result!.name).toBe('commit-ready');
- });
-
- it('should detect committed for git commit command', async () => {
- const result = await hooks.detectCheckpointFromToolUse(gitCommitBashData);
-
- expect(result).not.toBeNull();
- expect(result!.name).toBe('committed');
- });
-
- it('should extract commit message from git commit command', async () => {
- const result = await hooks.detectCheckpointFromToolUse(gitCommitBashData);
-
- expect(result!.data.message).toBe('feat: add users');
- });
-
- it('should include the git commit command in checkpoint data', async () => {
- const result = await hooks.detectCheckpointFromToolUse(gitCommitBashData);
-
- expect(result!.data.command).toContain('git commit');
- });
-
- it('should detect committed before test-complete when git commit also triggers test patterns', async () => {
- // git commit takes priority over test pattern check
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'git commit -m "test: add coverage"' },
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result!.name).toBe('committed');
- });
- });
-
- describe('null / no-checkpoint cases', () => {
- it('should return null for failed tool execution', async () => {
- const failedTool: ToolUseData = {
- toolName: 'Write',
- arguments: { file_path: 'src/api.ts', content: '' },
- success: false,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(failedTool);
-
- expect(result).toBeNull();
- });
-
- it('should return null for Read tool (non-checkpoint)', async () => {
- const readTool: ToolUseData = {
- toolName: 'Read',
- arguments: { file_path: 'README.md' },
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(readTool);
-
- expect(result).toBeNull();
- });
-
- it('should return null for Bash tool with non-checkpoint command', async () => {
- const data: ToolUseData = {
- toolName: 'Bash',
- arguments: { command: 'ls -la' },
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result).toBeNull();
- });
-
- it('should return null when arguments are missing', async () => {
- const data: ToolUseData = {
- toolName: 'Write',
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result).toBeNull();
- });
-
- it('should return null when arguments is a non-object primitive', async () => {
- const data: ToolUseData = {
- toolName: 'Write',
- arguments: 'string-arg' as unknown,
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result).toBeNull();
- });
-
- it('should return null for unknown tool names', async () => {
- const data: ToolUseData = {
- toolName: 'UnknownTool',
- arguments: { something: 'value' },
- success: true,
- };
-
- const result = await hooks.detectCheckpointFromToolUse(data);
-
- expect(result).toBeNull();
- });
- });
- });
-
- // ── processToolUse ────────────────────────────────────────────────────────────
-
- describe('processToolUse()', () => {
- it('should trigger checkpoint through detector for Write tool', async () => {
- const triggerSpy = vi.spyOn(detector, 'triggerCheckpoint');
-
- await hooks.processToolUse(writeToolData);
-
- expect(triggerSpy).toHaveBeenCalledWith('code-written', expect.any(Object));
- });
-
- it('should trigger checkpoint through detector for Bash git commit', async () => {
- const triggerSpy = vi.spyOn(detector, 'triggerCheckpoint');
-
- await hooks.processToolUse(gitCommitBashData);
-
- expect(triggerSpy).toHaveBeenCalledWith('committed', expect.any(Object));
- });
-
- it('should not trigger checkpoint for failed tool', async () => {
- const triggerSpy = vi.spyOn(detector, 'triggerCheckpoint');
- const failedData: ToolUseData = { ...writeToolData, success: false };
-
- await hooks.processToolUse(failedData);
-
- expect(triggerSpy).not.toHaveBeenCalled();
- });
-
- it('should not trigger checkpoint for non-checkpoint tool', async () => {
- const triggerSpy = vi.spyOn(detector, 'triggerCheckpoint');
- const readData: ToolUseData = {
- toolName: 'Read',
- arguments: { file_path: 'README.md' },
- success: true,
- };
-
- await hooks.processToolUse(readData);
-
- expect(triggerSpy).not.toHaveBeenCalled();
- });
-
- it('should execute all registered onButlerTrigger callbacks when checkpoint fires', async () => {
- const callback1 = vi.fn();
- const callback2 = vi.fn();
-
- hooks.onButlerTrigger(callback1);
- hooks.onButlerTrigger(callback2);
-
- await hooks.processToolUse(writeToolData);
-
- expect(callback1).toHaveBeenCalledOnce();
- expect(callback2).toHaveBeenCalledOnce();
- });
-
- it('should pass correct CheckpointContext to callbacks', async () => {
- let capturedContext: CheckpointContext | null = null;
-
- hooks.onButlerTrigger((ctx) => {
- capturedContext = ctx;
- });
-
- await hooks.processToolUse(writeToolData);
-
- expect(capturedContext).not.toBeNull();
- expect(capturedContext!.checkpoint).toBe('code-written');
- expect(capturedContext!.toolName).toBe('Write');
- expect(capturedContext!.data).toHaveProperty('files');
- });
-
- it('should not call callbacks when no checkpoint is detected', async () => {
- const callback = vi.fn();
- hooks.onButlerTrigger(callback);
-
- const readData: ToolUseData = {
- toolName: 'Read',
- arguments: { file_path: 'docs.md' },
- success: true,
- };
-
- await hooks.processToolUse(readData);
-
- expect(callback).not.toHaveBeenCalled();
- });
-
- it('should track token usage via project memory when provided', async () => {
- const tracker = createMockTracker();
- const tokenHookSpy = vi.spyOn(tracker, 'createTokenHook');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse({
- ...writeToolData,
- tokensUsed: 1200,
- });
-
- expect(tokenHookSpy).toHaveBeenCalled();
- });
-
- it('should not call addTokens when no tokensUsed field provided', async () => {
- const tracker = createMockTracker();
- const addTokensSpy = vi.spyOn(tracker, 'addTokens');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse({
- toolName: 'Read',
- arguments: { file_path: 'README.md' },
- success: true,
- // No tokensUsed
- });
-
- expect(addTokensSpy).not.toHaveBeenCalled();
- });
-
- it('should record code change to project memory on code-written checkpoint', async () => {
- const tracker = createMockTracker();
- const fileChangeHookSpy = vi.spyOn(tracker, 'createFileChangeHook');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse(writeToolData);
-
- expect(fileChangeHookSpy).toHaveBeenCalled();
- });
-
- it('should record test results to project memory on test-complete checkpoint', async () => {
- const tracker = createMockTracker();
- const testResultHookSpy = vi.spyOn(tracker, 'createTestResultHook');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse(testBashData);
-
- expect(testResultHookSpy).toHaveBeenCalled();
- });
-
- describe('Error auto-detection from output', () => {
- it('should attempt error recording when output contains "error:" pattern', async () => {
- const tracker = createMockTracker();
- const recordErrorSpy = vi.spyOn(tracker, 'recordError');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse({
- toolName: 'Bash',
- arguments: { command: 'npm run build' },
- success: true,
- output: 'TypeError: Cannot read property "foo" of undefined',
- });
-
- expect(recordErrorSpy).toHaveBeenCalled();
- });
-
- it('should not attempt error recording when output has no error patterns', async () => {
- const tracker = createMockTracker();
- const recordErrorSpy = vi.spyOn(tracker, 'recordError');
- const hooksWithMemory = new HookIntegration(detector, tracker);
-
- await hooksWithMemory.processToolUse({
- toolName: 'Bash',
- arguments: { command: 'ls -la' },
- success: true,
- output: 'total 48\ndrwxr-xr-x 5 user group 160',
- });
-
- expect(recordErrorSpy).not.toHaveBeenCalled();
- });
-
- it('should not record error when projectAutoTracker is absent', async () => {
- const hooksWithoutTracker = new HookIntegration(detector);
-
- // Should not throw even with error output and no tracker
- await expect(hooksWithoutTracker.processToolUse({
- toolName: 'Bash',
- arguments: { command: 'npm run build' },
- success: true,
- output: 'Build failed: error in module',
- })).resolves.toBeUndefined();
- });
- });
- });
-
- // ── onButlerTrigger ──────────────────────────────────────────────────────────
-
- describe('onButlerTrigger()', () => {
- it('should register a callback and invoke it on checkpoint', async () => {
- const callback = vi.fn();
- hooks.onButlerTrigger(callback);
-
- await hooks.processToolUse(writeToolData);
-
- expect(callback).toHaveBeenCalledOnce();
- });
-
- it('should allow multiple callbacks to be registered', async () => {
- const cb1 = vi.fn();
- const cb2 = vi.fn();
- const cb3 = vi.fn();
-
- hooks.onButlerTrigger(cb1);
- hooks.onButlerTrigger(cb2);
- hooks.onButlerTrigger(cb3);
-
- await hooks.processToolUse(writeToolData);
-
- expect(cb1).toHaveBeenCalledOnce();
- expect(cb2).toHaveBeenCalledOnce();
- expect(cb3).toHaveBeenCalledOnce();
- });
-
- it('should fire callbacks for every checkpoint triggered', async () => {
- const callback = vi.fn();
- hooks.onButlerTrigger(callback);
-
- await hooks.processToolUse(writeToolData); // code-written
- await hooks.processToolUse(testBashData); // test-complete
- await hooks.processToolUse(gitCommitBashData); // committed
-
- expect(callback).toHaveBeenCalledTimes(3);
- });
-
- it('should include checkpoint name in each callback context', async () => {
- const names: string[] = [];
- hooks.onButlerTrigger(ctx => names.push(ctx.checkpoint));
-
- await hooks.processToolUse(writeToolData);
- await hooks.processToolUse(testBashData);
- await hooks.processToolUse(gitAddBashData);
-
- expect(names).toContain('code-written');
- expect(names).toContain('test-complete');
- expect(names).toContain('commit-ready');
- });
- });
-
- // ── setProjectMemory ─────────────────────────────────────────────────────────
-
- describe('setProjectMemory()', () => {
- it('should set project memory when not already set', () => {
- const tracker = createMockTracker();
- hooks.setProjectMemory(tracker);
- // No throw = success; internal state verified via token tracking
- expect(() => hooks.setProjectMemory(tracker)).not.toThrow();
- });
-
- it('should not overwrite already-set project memory', async () => {
- const tracker1 = createMockTracker();
- const tracker2 = createMockTracker();
-
- const hooksWithTracker = new HookIntegration(detector, tracker1);
-
- // Try to override — should be ignored
- hooksWithTracker.setProjectMemory(tracker2);
-
- // The original tracker1 should be used for token tracking
- const addTokens1 = vi.spyOn(tracker1, 'addTokens');
- const addTokens2 = vi.spyOn(tracker2, 'addTokens');
-
- await hooksWithTracker.processToolUse({ ...writeToolData, tokensUsed: 100 });
-
- expect(addTokens1).toHaveBeenCalled();
- expect(addTokens2).not.toHaveBeenCalled();
- });
- });
-
- // ── Checkpoint detector integration ──────────────────────────────────────────
-
- describe('CheckpointDetector integration', () => {
- it('should throw when triggered checkpoint is not registered in detector', async () => {
- const emptyDetector = new CheckpointDetector();
- const hooksWithEmptyDetector = new HookIntegration(emptyDetector);
-
- await expect(hooksWithEmptyDetector.processToolUse(writeToolData)).rejects.toThrow();
- });
-
- it('should succeed when all required checkpoints are registered', async () => {
- await expect(hooks.processToolUse(writeToolData)).resolves.toBeUndefined();
- });
- });
-});
diff --git a/tests/unit/db/BetterSqlite3Adapter.test.ts b/tests/unit/db/BetterSqlite3Adapter.test.ts
deleted file mode 100644
index 991cb352..00000000
--- a/tests/unit/db/BetterSqlite3Adapter.test.ts
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * BetterSqlite3Adapter Unit Tests
- *
- * Tests for the quick_check PRAGMA validation added to the create() method.
- * Covers: file-based DB success, in-memory DB skip, and quick_check failure path.
- */
-
-import { describe, it, expect, vi, afterEach } from 'vitest';
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-import { BetterSqlite3Adapter } from '../../../src/db/adapters/BetterSqlite3Adapter.js';
-
-describe('BetterSqlite3Adapter', () => {
- const tmpDir = os.tmpdir();
-
- afterEach(() => {
- vi.restoreAllMocks();
- });
-
- describe('create() — quick_check validation', () => {
- it('should open a valid file-based database without error', async () => {
- const dbPath = path.join(tmpDir, `adapter-valid-${Date.now()}.db`);
-
- try {
- const adapter = await BetterSqlite3Adapter.create(dbPath);
-
- expect(adapter).toBeDefined();
- expect(adapter.open).toBe(true);
- expect(adapter.inMemory).toBe(false);
- expect(adapter.name).toBe('better-sqlite3');
-
- adapter.close();
- } finally {
- if (fs.existsSync(dbPath)) fs.unlinkSync(dbPath);
- }
- });
-
- it('should open an in-memory database without running quick_check', async () => {
- // We spy on Database.prototype.pragma to verify it is NOT called for :memory:
- const BetterSqlite3 = (await import('better-sqlite3')).default;
- const pragmaSpy = vi.spyOn(BetterSqlite3.prototype, 'pragma');
-
- const adapter = await BetterSqlite3Adapter.create(':memory:');
-
- expect(adapter).toBeDefined();
- expect(adapter.open).toBe(true);
- expect(adapter.inMemory).toBe(true);
-
- // pragma('quick_check') must NOT have been called for in-memory databases
- const quickCheckCalls = pragmaSpy.mock.calls.filter(([name]) => name === 'quick_check');
- expect(quickCheckCalls).toHaveLength(0);
-
- adapter.close();
- });
-
- it('should throw a descriptive error when quick_check returns a non-ok result', async () => {
- const dbPath = path.join(tmpDir, `adapter-corrupt-${Date.now()}.db`);
-
- // Create the DB file so better-sqlite3 can open it
- const BetterSqlite3 = (await import('better-sqlite3')).default;
-
- try {
- // Spy on pragma to simulate a failed integrity check
- vi.spyOn(BetterSqlite3.prototype, 'pragma').mockImplementation((name: string) => {
- if (name === 'quick_check') {
- return 'integrity_check_failed';
- }
- // Call original for any other pragma
- return undefined;
- });
-
- await expect(BetterSqlite3Adapter.create(dbPath)).rejects.toThrow(
- 'Database integrity check failed'
- );
- } finally {
- if (fs.existsSync(dbPath)) fs.unlinkSync(dbPath);
- }
- });
-
- it('should re-throw the integrity check error (not swallow it)', async () => {
- const dbPath = path.join(tmpDir, `adapter-check-${Date.now()}.db`);
-
- const BetterSqlite3 = (await import('better-sqlite3')).default;
-
- try {
- vi.spyOn(BetterSqlite3.prototype, 'pragma').mockImplementation((name: string) => {
- if (name === 'quick_check') {
- return 'error_on_page_1';
- }
- return undefined;
- });
-
- const rejection = BetterSqlite3Adapter.create(dbPath);
-
- await expect(rejection).rejects.toThrow('Database integrity check failed: error_on_page_1');
- } finally {
- if (fs.existsSync(dbPath)) fs.unlinkSync(dbPath);
- }
- });
- });
-});
diff --git a/tests/unit/db/ConnectionPool.test.ts b/tests/unit/db/ConnectionPool.test.ts
deleted file mode 100644
index debe49a7..00000000
--- a/tests/unit/db/ConnectionPool.test.ts
+++ /dev/null
@@ -1,572 +0,0 @@
-/**
- * ConnectionPool Unit Tests
- *
- * Comprehensive test suite for SQLite connection pooling functionality.
- * Tests cover pool initialization, connection acquisition/release,
- * concurrent access, timeout handling, health checks, and graceful shutdown.
- */
-
-import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
-import { ConnectionPool } from '../../../src/db/ConnectionPool.js';
-import type Database from 'better-sqlite3';
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-
-// Database type is used for type assertions in tests
-
-describe('ConnectionPool', () => {
- let testDbPath: string;
- let pool: ConnectionPool;
-
- beforeEach(() => {
- // Create unique test database for each test
- const timestamp = Date.now();
- const random = Math.floor(Math.random() * 10000);
- testDbPath = path.join(os.tmpdir(), `test-pool-${timestamp}-${random}.db`);
- });
-
- afterEach(async () => {
- // Cleanup pool
- if (pool) {
- await pool.shutdown();
- }
-
- // Remove test database
- if (fs.existsSync(testDbPath)) {
- fs.unlinkSync(testDbPath);
- }
-
- // Clean up WAL and SHM files
- [testDbPath + '-wal', testDbPath + '-shm'].forEach(file => {
- if (fs.existsSync(file)) {
- fs.unlinkSync(file);
- }
- });
- });
-
- describe('Pool Initialization', () => {
- it('should create pool with default options', async () => {
- pool = await ConnectionPool.create(testDbPath);
- const stats = pool.getStats();
-
- expect(stats.total).toBe(5); // Default maxConnections
- expect(stats.idle).toBe(5); // All connections idle initially
- expect(stats.active).toBe(0);
- expect(stats.waiting).toBe(0);
- });
-
- it('should create pool with custom options', async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 2000,
- idleTimeout: 15000,
- });
-
- const stats = pool.getStats();
- expect(stats.total).toBe(3);
- expect(stats.idle).toBe(3);
- });
-
- it('should throw error for invalid maxConnections', () => {
- expect(() => {
- new ConnectionPool(testDbPath, { maxConnections: 0, connectionTimeout: 5000, idleTimeout: 30000 });
- }).toThrow('maxConnections must be at least 1');
-
- expect(() => {
- new ConnectionPool(testDbPath, { maxConnections: -1, connectionTimeout: 5000, idleTimeout: 30000 });
- }).toThrow('maxConnections must be at least 1');
- });
-
- it('should create pool with in-memory database', async () => {
- pool = await ConnectionPool.create(':memory:', { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
- const stats = pool.getStats();
-
- expect(stats.total).toBe(2);
- expect(stats.idle).toBe(2);
- });
-
- it('should report healthy status after initialization', async () => {
- pool = await ConnectionPool.create(testDbPath, { maxConnections: 3, connectionTimeout: 5000, idleTimeout: 30000 });
- expect(pool.isHealthy()).toBe(true);
- });
- });
-
- describe('Connection Acquisition and Release', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 30000,
- });
- });
-
- it('should acquire and release connection', async () => {
- const statsBefore = pool.getStats();
- expect(statsBefore.idle).toBe(3);
- expect(statsBefore.active).toBe(0);
-
- const db = await pool.acquire();
- expect(db).toBeDefined();
-
- const statsAcquired = pool.getStats();
- expect(statsAcquired.idle).toBe(2);
- expect(statsAcquired.active).toBe(1);
- expect(statsAcquired.totalAcquired).toBe(1);
-
- pool.release(db);
-
- const statsReleased = pool.getStats();
- expect(statsReleased.idle).toBe(3);
- expect(statsReleased.active).toBe(0);
- expect(statsReleased.totalReleased).toBe(1);
- });
-
- it('should allow using acquired connection', async () => {
- const db = await pool.acquire();
-
- // Create table and insert data
- db.exec('CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)');
- db.prepare('INSERT INTO test (name) VALUES (?)').run('Alice');
-
- const result = db.prepare('SELECT * FROM test WHERE name = ?').get('Alice') as any;
- expect(result).toBeDefined();
- expect(result.name).toBe('Alice');
-
- pool.release(db);
- });
-
- it('should handle multiple sequential acquisitions', async () => {
- const db1 = await pool.acquire();
- pool.release(db1);
-
- const db2 = await pool.acquire();
- pool.release(db2);
-
- const db3 = await pool.acquire();
- pool.release(db3);
-
- const stats = pool.getStats();
- expect(stats.totalAcquired).toBe(3);
- expect(stats.totalReleased).toBe(3);
- expect(stats.idle).toBe(3);
- });
-
- it('should reuse connections', async () => {
- const db1 = await pool.acquire();
- pool.release(db1);
-
- const statsBefore = pool.getStats();
- expect(statsBefore.idle).toBe(3);
-
- const db2 = await pool.acquire();
- // Connection is reused (comes from available pool, not created new)
- const statsAfter = pool.getStats();
- expect(statsAfter.idle).toBe(2);
- expect(statsAfter.active).toBe(1);
-
- pool.release(db2);
- });
-
- it('should ignore release of unknown connection', async () => {
- const unknownDb = {} as Database.Database;
- pool.release(unknownDb); // Should not throw
-
- const stats = pool.getStats();
- expect(stats.totalReleased).toBe(0);
- });
- });
-
- describe('Concurrent Access', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 30000,
- });
- });
-
- it('should handle concurrent acquisitions within pool size', async () => {
- const acquisitions = [
- pool.acquire(),
- pool.acquire(),
- pool.acquire(),
- ];
-
- const connections = await Promise.all(acquisitions);
- expect(connections).toHaveLength(3);
-
- const stats = pool.getStats();
- expect(stats.active).toBe(3);
- expect(stats.idle).toBe(0);
-
- // Release all
- connections.forEach(db => pool.release(db));
-
- const statsAfter = pool.getStats();
- expect(statsAfter.idle).toBe(3);
- expect(statsAfter.active).toBe(0);
- });
-
- it('should queue requests when pool is exhausted', async () => {
- // Acquire all connections
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
- const conn3 = await pool.acquire();
-
- const stats1 = pool.getStats();
- expect(stats1.active).toBe(3);
- expect(stats1.idle).toBe(0);
-
- // Try to acquire one more (should queue)
- const pendingPromise = pool.acquire();
-
- // Wait a bit to ensure it's queued
- await new Promise(resolve => setTimeout(resolve, 100));
-
- const stats2 = pool.getStats();
- expect(stats2.waiting).toBe(1);
-
- // Release one connection
- pool.release(conn1);
-
- // The pending request should now be fulfilled
- const conn4 = await pendingPromise;
- expect(conn4).toBeDefined();
-
- const stats3 = pool.getStats();
- expect(stats3.waiting).toBe(0);
- expect(stats3.active).toBe(3); // conn2, conn3, conn4
-
- // Cleanup
- pool.release(conn2);
- pool.release(conn3);
- pool.release(conn4);
- });
-
- it('should handle multiple queued requests', async () => {
- // Acquire all connections
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
- const conn3 = await pool.acquire();
-
- // Queue 3 requests (matching pool size)
- const pending = [
- pool.acquire(),
- pool.acquire(),
- pool.acquire(),
- ];
-
- // Wait for queue to populate
- await new Promise(resolve => setTimeout(resolve, 100));
-
- const stats = pool.getStats();
- expect(stats.waiting).toBe(3);
-
- // Release all connections
- pool.release(conn1);
- pool.release(conn2);
- pool.release(conn3);
-
- // All pending requests should be fulfilled
- const fulfilled = await Promise.all(pending);
- expect(fulfilled).toHaveLength(3);
-
- // Cleanup
- fulfilled.forEach(db => pool.release(db));
- });
- });
-
- describe('Connection Timeout', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 2,
- connectionTimeout: 1000, // 1 second timeout
- idleTimeout: 30000,
- });
- });
-
- it('should timeout when no connection available', async () => {
- // Acquire all connections
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
-
- // Try to acquire one more - should timeout
- await expect(pool.acquire()).rejects.toThrow('Connection acquisition timeout after 1000ms');
-
- const stats = pool.getStats();
- expect(stats.timeoutErrors).toBe(1);
-
- // Cleanup
- pool.release(conn1);
- pool.release(conn2);
- });
-
- it('should not timeout if connection becomes available', async () => {
- // Acquire all connections
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
-
- // Start acquiring (will queue)
- const pendingPromise = pool.acquire();
-
- // Release one connection after 500ms (before timeout)
- setTimeout(() => pool.release(conn1), 500);
-
- // Should succeed (no timeout)
- const conn3 = await pendingPromise;
- expect(conn3).toBeDefined();
-
- const stats = pool.getStats();
- expect(stats.timeoutErrors).toBe(0);
-
- // Cleanup
- pool.release(conn2);
- pool.release(conn3);
- });
- });
-
- describe('Health Checks and Idle Timeout', () => {
- it('should recycle idle connections', async () => {
- vi.useFakeTimers();
-
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 2,
- connectionTimeout: 5000,
- idleTimeout: 5000, // Minimum allowed idle timeout
- healthCheckInterval: 5000, // Minimum allowed health check interval
- });
-
- // Acquire and immediately release a connection
- const conn = await pool.acquire();
- pool.release(conn);
-
- const initialStats = pool.getStats();
- expect(initialStats.totalRecycled).toBe(0);
-
- // Advance past idle timeout + health check interval to trigger recycling
- await vi.advanceTimersByTimeAsync(11000);
-
- const afterStats = pool.getStats();
- expect(afterStats.totalRecycled).toBeGreaterThan(0);
- expect(afterStats.total).toBe(2); // Pool size maintained
- expect(pool.isHealthy()).toBe(true);
-
- vi.useRealTimers();
- });
-
- it('should maintain pool size after recycling', async () => {
- vi.useFakeTimers();
-
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 5000, // Minimum allowed idle timeout
- healthCheckInterval: 5000, // Minimum allowed health check interval
- });
-
- // Use all connections
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
- const conn3 = await pool.acquire();
-
- pool.release(conn1);
- pool.release(conn2);
- pool.release(conn3);
-
- // Advance past idle timeout + health check interval to trigger recycling
- await vi.advanceTimersByTimeAsync(11000);
-
- const stats = pool.getStats();
- expect(stats.total).toBe(3);
- expect(stats.idle).toBe(3);
- expect(pool.isHealthy()).toBe(true);
-
- vi.useRealTimers();
- });
- });
-
- describe('Graceful Shutdown', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 30000,
- });
- });
-
- it('should shutdown cleanly', async () => {
- await pool.shutdown();
-
- const stats = pool.getStats();
- expect(stats.total).toBe(0);
- expect(stats.idle).toBe(0);
- });
-
- it('should reject acquisitions after shutdown', async () => {
- await pool.shutdown();
-
- await expect(pool.acquire()).rejects.toThrow('Pool is shutting down');
- });
-
- it('should reject queued requests on shutdown', async () => {
- // Acquire all connections to exhaust pool
- await pool.acquire();
- await pool.acquire();
- await pool.acquire();
-
- // Queue requests
- const pending1 = pool.acquire();
- const pending2 = pool.acquire();
-
- // Wait for queue
- await new Promise(resolve => setTimeout(resolve, 100));
-
- // Shutdown - should reject queued requests
- const shutdownPromise = pool.shutdown();
-
- await expect(pending1).rejects.toThrow('Pool is shutting down');
- await expect(pending2).rejects.toThrow('Pool is shutting down');
-
- await shutdownPromise;
- });
-
- it('should handle multiple shutdown calls gracefully', async () => {
- await pool.shutdown();
- await pool.shutdown(); // Should not throw
- await pool.shutdown(); // Should not throw
- });
-
- it('should ignore releases after shutdown', async () => {
- const conn = await pool.acquire();
- await pool.shutdown();
-
- // Should not throw
- pool.release(conn);
- });
- });
-
- describe('Statistics Tracking', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 3,
- connectionTimeout: 5000,
- idleTimeout: 30000,
- });
- });
-
- it('should track acquisitions and releases', async () => {
- const conn1 = await pool.acquire();
- const conn2 = await pool.acquire();
-
- let stats = pool.getStats();
- expect(stats.totalAcquired).toBe(2);
- expect(stats.totalReleased).toBe(0);
-
- pool.release(conn1);
-
- stats = pool.getStats();
- expect(stats.totalAcquired).toBe(2);
- expect(stats.totalReleased).toBe(1);
-
- pool.release(conn2);
-
- stats = pool.getStats();
- expect(stats.totalAcquired).toBe(2);
- expect(stats.totalReleased).toBe(2);
- });
-
- it('should track waiting requests', async () => {
- // Acquire all
- const [conn1, conn2] = await Promise.all([
- pool.acquire(),
- pool.acquire(),
- pool.acquire(),
- ]);
-
- // Queue requests
- const pending1 = pool.acquire();
- const pending2 = pool.acquire();
-
- await new Promise(resolve => setTimeout(resolve, 100));
-
- const stats = pool.getStats();
- expect(stats.waiting).toBe(2);
-
- // Release and resolve
- pool.release(conn1);
- pool.release(conn2);
-
- await Promise.all([pending1, pending2]);
-
- const finalStats = pool.getStats();
- expect(finalStats.waiting).toBe(0);
- });
-
- it('should track timeout errors', async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 1,
- connectionTimeout: 500,
- idleTimeout: 30000,
- });
-
- const conn = await pool.acquire();
-
- // Try to acquire - will timeout
- await expect(pool.acquire()).rejects.toThrow();
-
- const stats = pool.getStats();
- expect(stats.timeoutErrors).toBe(1);
-
- pool.release(conn);
- });
- });
-
- describe('Error Handling', () => {
- it('should handle database file errors', async () => {
- const invalidPath = '/invalid/path/to/db.sqlite';
-
- await expect(
- ConnectionPool.create(invalidPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 })
- ).rejects.toThrow();
- });
-
- it('should handle connection close errors gracefully', async () => {
- pool = await ConnectionPool.create(testDbPath, { maxConnections: 2, connectionTimeout: 5000, idleTimeout: 30000 });
-
- const conn = await pool.acquire();
-
- // Force close the connection
- conn.close();
-
- // Release should not throw
- pool.release(conn);
-
- // Shutdown should handle already-closed connection
- await expect(pool.shutdown()).resolves.not.toThrow();
- });
- });
-
- describe('Connection Metadata', () => {
- beforeEach(async () => {
- pool = await ConnectionPool.create(testDbPath, {
- maxConnections: 2,
- connectionTimeout: 5000,
- idleTimeout: 30000,
- });
- });
-
- it('should track connection usage count', async () => {
- const conn = await pool.acquire();
- pool.release(conn);
-
- const conn2 = await pool.acquire();
- // Connection is reused from pool
- pool.release(conn2);
-
- const stats = pool.getStats();
- expect(stats.totalAcquired).toBe(2); // Connection acquired twice
- expect(stats.totalReleased).toBe(2); // And released twice
- });
- });
-});
diff --git a/tests/unit/db/SimpleDatabaseFactory.pool.test.ts b/tests/unit/db/SimpleDatabaseFactory.pool.test.ts
deleted file mode 100644
index c7458f41..00000000
--- a/tests/unit/db/SimpleDatabaseFactory.pool.test.ts
+++ /dev/null
@@ -1,326 +0,0 @@
-/**
- * SimpleDatabaseFactory Connection Pool Integration Tests
- *
- * Tests the integration of ConnectionPool with SimpleDatabaseFactory.
- * Validates pool management, configuration, and backward compatibility.
- */
-
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import { SimpleDatabaseFactory } from '../../../src/config/simple-config.js';
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-
-describe('SimpleDatabaseFactory - Connection Pool Integration', () => {
- let testDbPath: string;
-
- beforeEach(() => {
- // Create unique test database for each test
- const timestamp = Date.now();
- const random = Math.floor(Math.random() * 10000);
- testDbPath = path.join(os.tmpdir(), `test-factory-pool-${timestamp}-${random}.db`);
- });
-
- afterEach(async () => {
- // Cleanup factory
- await SimpleDatabaseFactory.closeAll();
-
- // Remove test database
- if (fs.existsSync(testDbPath)) {
- fs.unlinkSync(testDbPath);
- }
-
- // Clean up WAL and SHM files
- [testDbPath + '-wal', testDbPath + '-shm'].forEach(file => {
- if (fs.existsSync(file)) {
- fs.unlinkSync(file);
- }
- });
-
- // Clean environment variables
- delete process.env.DB_POOL_SIZE;
- delete process.env.DB_POOL_TIMEOUT;
- delete process.env.DB_POOL_IDLE_TIMEOUT;
- });
-
- describe('Pool Management', () => {
- it('should create connection pool on first getPool() call', async () => {
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
- expect(pool).toBeDefined();
-
- const stats = pool.getStats();
- expect(stats.total).toBe(5); // Default pool size
- });
-
- it('should reuse existing pool', async () => {
- const pool1 = await SimpleDatabaseFactory.getPool(testDbPath);
- const pool2 = await SimpleDatabaseFactory.getPool(testDbPath);
-
- expect(pool1).toBe(pool2); // Same instance
- });
-
- it('should create separate pools for different paths', async () => {
- const pool1 = await SimpleDatabaseFactory.getPool(testDbPath);
- const pool2 = await SimpleDatabaseFactory.getPool(':memory:');
-
- expect(pool1).not.toBe(pool2);
- });
-
- it('should respect DB_POOL_SIZE environment variable', async () => {
- process.env.DB_POOL_SIZE = '10';
-
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
- const stats = pool.getStats();
-
- expect(stats.total).toBe(10);
- });
-
- it('should respect DB_POOL_TIMEOUT environment variable', async () => {
- process.env.DB_POOL_TIMEOUT = '3000';
-
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
- expect(pool).toBeDefined();
- // Timeout is internal, validated indirectly through behavior
- });
-
- it('should respect DB_POOL_IDLE_TIMEOUT environment variable', async () => {
- process.env.DB_POOL_IDLE_TIMEOUT = '60000';
-
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
- expect(pool).toBeDefined();
- // Idle timeout is internal, validated indirectly through behavior
- });
-
- it('should use default values for invalid environment variables', async () => {
- process.env.DB_POOL_SIZE = 'invalid';
- process.env.DB_POOL_TIMEOUT = 'abc';
-
- const pool = await SimpleDatabaseFactory.getPool(testDbPath);
- const stats = pool.getStats();
-
- // Invalid values should fallback to defaults
- expect(stats.total).toBe(5); // Default maxConnections
- });
- });
-
- describe('Pooled Connection Acquisition', () => {
- it('should acquire pooled connection', async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
- expect(db).toBeDefined();
- expect(db.open).toBe(true);
-
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
- });
-
- it('should allow using pooled connection', async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- // Create table and query
- db.exec('CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)');
- db.prepare('INSERT INTO test (name) VALUES (?)').run('Alice');
-
- const result = db.prepare('SELECT * FROM test WHERE name = ?').get('Alice') as any;
- expect(result).toBeDefined();
- expect(result.name).toBe('Alice');
-
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
- });
-
- it('should handle multiple pooled connections', async () => {
- const db1 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
- const db2 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
- const db3 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- expect(db1).toBeDefined();
- expect(db2).toBeDefined();
- expect(db3).toBeDefined();
-
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats?.active).toBe(3);
- expect(stats?.idle).toBe(2); // 5 total - 3 active
-
- SimpleDatabaseFactory.releasePooledConnection(db1, testDbPath);
- SimpleDatabaseFactory.releasePooledConnection(db2, testDbPath);
- SimpleDatabaseFactory.releasePooledConnection(db3, testDbPath);
-
- const statsAfter = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(statsAfter?.active).toBe(0);
- expect(statsAfter?.idle).toBe(5);
- });
-
- it('should handle release of unknown connection gracefully', async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- // Try to release to non-existent pool
- SimpleDatabaseFactory.releasePooledConnection(db, '/non/existent/path.db');
-
- // Should not throw, just log error
- expect(true).toBe(true);
-
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
- });
- });
-
- describe('Pool Statistics', () => {
- it('should return pool statistics', async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats).toBeDefined();
- expect(stats?.total).toBe(5);
- expect(stats?.active).toBe(1);
- expect(stats?.idle).toBe(4);
- expect(stats?.waiting).toBe(0);
-
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
- });
-
- it('should return null for non-existent pool', () => {
- const stats = SimpleDatabaseFactory.getPoolStats('/non/existent/path.db');
- expect(stats).toBeNull();
- });
-
- it('should track total acquired and released', async () => {
- const db1 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
- const db2 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- SimpleDatabaseFactory.releasePooledConnection(db1, testDbPath);
-
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats?.totalAcquired).toBe(2);
- expect(stats?.totalReleased).toBe(1);
-
- SimpleDatabaseFactory.releasePooledConnection(db2, testDbPath);
-
- const statsAfter = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(statsAfter?.totalAcquired).toBe(2);
- expect(statsAfter?.totalReleased).toBe(2);
- });
- });
-
- describe('Backward Compatibility', () => {
- it('should still support singleton getInstance()', () => {
- const db1 = SimpleDatabaseFactory.getInstance(testDbPath);
- const db2 = SimpleDatabaseFactory.getInstance(testDbPath);
-
- expect(db1).toBe(db2); // Same singleton instance
- expect(db1.open).toBe(true);
- });
-
- it('should support singleton and pooled connections simultaneously', async () => {
- const singleton = SimpleDatabaseFactory.getInstance(testDbPath);
- const pooled = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- expect(singleton).toBeDefined();
- expect(pooled).toBeDefined();
- expect(singleton).not.toBe(pooled); // Different connections
-
- SimpleDatabaseFactory.releasePooledConnection(pooled, testDbPath);
- });
-
- it('should support createTestDatabase()', () => {
- const testDb = SimpleDatabaseFactory.createTestDatabase();
- expect(testDb).toBeDefined();
- expect(testDb.open).toBe(true);
-
- testDb.exec('CREATE TABLE test (id INTEGER PRIMARY KEY)');
- const result = testDb.prepare("SELECT * FROM sqlite_master WHERE type='table'").all();
- expect(result.length).toBeGreaterThan(0);
-
- testDb.close();
- });
- });
-
- describe('Cleanup', () => {
- it('should close singleton and pool on close()', async () => {
- const singleton = SimpleDatabaseFactory.getInstance(testDbPath);
- const pooled = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- SimpleDatabaseFactory.releasePooledConnection(pooled, testDbPath);
-
- await SimpleDatabaseFactory.close(testDbPath);
-
- // Singleton should be closed
- expect(singleton.open).toBe(false);
-
- // Pool should be shutdown
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats).toBeNull();
- });
-
- it('should close all singletons and pools on closeAll()', async () => {
- const db1 = SimpleDatabaseFactory.getInstance(testDbPath);
- const pooled1 = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
-
- const testDbPath2 = path.join(os.tmpdir(), `test-factory-pool-${Date.now()}-2.db`);
- const db2 = SimpleDatabaseFactory.getInstance(testDbPath2);
- const pooled2 = await SimpleDatabaseFactory.getPooledConnection(testDbPath2);
-
- SimpleDatabaseFactory.releasePooledConnection(pooled1, testDbPath);
- SimpleDatabaseFactory.releasePooledConnection(pooled2, testDbPath2);
-
- await SimpleDatabaseFactory.closeAll();
-
- // All singletons closed
- expect(db1.open).toBe(false);
- expect(db2.open).toBe(false);
-
- // All pools shutdown
- expect(SimpleDatabaseFactory.getPoolStats(testDbPath)).toBeNull();
- expect(SimpleDatabaseFactory.getPoolStats(testDbPath2)).toBeNull();
-
- // Cleanup second test file
- if (fs.existsSync(testDbPath2)) {
- fs.unlinkSync(testDbPath2);
- }
- [testDbPath2 + '-wal', testDbPath2 + '-shm'].forEach(file => {
- if (fs.existsSync(file)) {
- fs.unlinkSync(file);
- }
- });
- });
-
- it('should handle close() on non-existent path gracefully', async () => {
- await SimpleDatabaseFactory.close('/non/existent/path.db');
- // Should not throw
- expect(true).toBe(true);
- });
-
- it('should allow recreation after close()', async () => {
- const pool1 = await SimpleDatabaseFactory.getPool(testDbPath);
- await SimpleDatabaseFactory.close(testDbPath);
-
- const pool2 = await SimpleDatabaseFactory.getPool(testDbPath);
- expect(pool2).not.toBe(pool1); // New instance
- expect(pool2.getStats().total).toBe(5);
- });
- });
-
- describe('Concurrent Stress Test', () => {
- it('should handle concurrent pooled connections', async () => {
- const concurrentRequests = 20;
- const results: Promise[] = [];
-
- for (let i = 0; i < concurrentRequests; i++) {
- results.push((async () => {
- const db = await SimpleDatabaseFactory.getPooledConnection(testDbPath);
- try {
- // Simulate work
- db.prepare('SELECT 1 + 1 AS result').get();
- await new Promise(resolve => setTimeout(resolve, Math.random() * 100));
- } finally {
- SimpleDatabaseFactory.releasePooledConnection(db, testDbPath);
- }
- })());
- }
-
- await Promise.all(results);
-
- const stats = SimpleDatabaseFactory.getPoolStats(testDbPath);
- expect(stats?.totalAcquired).toBe(concurrentRequests);
- expect(stats?.totalReleased).toBe(concurrentRequests);
- expect(stats?.active).toBe(0);
- expect(stats?.idle).toBe(5);
- });
- });
-});
diff --git a/tests/unit/hooks/plan-db.test.ts b/tests/unit/hooks/plan-db.test.ts
deleted file mode 100644
index cb4774db..00000000
--- a/tests/unit/hooks/plan-db.test.ts
+++ /dev/null
@@ -1,203 +0,0 @@
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-import { execFileSync } from 'child_process';
-import {
- queryActivePlans,
- addObservation,
- updateEntityMetadata,
- updateEntityTag,
- createRelation,
- sqliteBatchEntity,
- sqliteQueryJSON,
-} from '../../../scripts/hooks/hook-utils.js';
-
-describe('Plan DB Functions', () => {
- let dbPath;
- let testDir;
-
- beforeEach(() => {
- testDir = path.join(os.tmpdir(), `plan-db-test-${Date.now()}`);
- fs.mkdirSync(testDir, { recursive: true });
- dbPath = path.join(testDir, 'test.db');
-
- // Create schema matching MeMesh KG
- const schema = `
- CREATE TABLE entities (id INTEGER PRIMARY KEY, name TEXT UNIQUE, type TEXT, created_at TEXT, metadata TEXT DEFAULT '{}');
- CREATE TABLE observations (id INTEGER PRIMARY KEY, entity_id INTEGER, content TEXT, created_at TEXT);
- CREATE TABLE tags (id INTEGER PRIMARY KEY, entity_id INTEGER, tag TEXT);
- CREATE TABLE relations (id INTEGER PRIMARY KEY, from_entity_id INTEGER, to_entity_id INTEGER, relation_type TEXT, metadata TEXT DEFAULT '{}', created_at TEXT, UNIQUE(from_entity_id, to_entity_id, relation_type));
- `;
- execFileSync('sqlite3', [dbPath], { input: schema, encoding: 'utf-8' });
- });
-
- afterEach(() => {
- if (fs.existsSync(testDir)) {
- fs.rmSync(testDir, { recursive: true, force: true });
- }
- });
-
- describe('queryActivePlans', () => {
- it('should return active plan entities', () => {
- const metadata = JSON.stringify({ totalSteps: 3, completed: 1, status: 'active', stepsDetail: [] });
- sqliteBatchEntity(dbPath,
- { name: 'Plan: test-plan', type: 'workflow_checkpoint', metadata },
- ['Step 1: Do something'], ['plan', 'active', 'scope:project']
- );
-
- const plans = queryActivePlans(dbPath);
- expect(plans).toHaveLength(1);
- expect(plans[0].name).toBe('Plan: test-plan');
- expect(plans[0].metadata.totalSteps).toBe(3);
- });
-
- it('should not return completed plans', () => {
- const metadata = JSON.stringify({ totalSteps: 2, completed: 2, status: 'completed' });
- sqliteBatchEntity(dbPath,
- { name: 'Plan: done-plan', type: 'workflow_checkpoint', metadata },
- ['Step 1'], ['plan', 'completed']
- );
-
- const plans = queryActivePlans(dbPath);
- expect(plans).toHaveLength(0);
- });
-
- it('should return empty array when DB does not exist', () => {
- expect(queryActivePlans('/nonexistent/path.db')).toEqual([]);
- });
- });
-
- describe('addObservation', () => {
- it('should add observation to existing entity', () => {
- sqliteBatchEntity(dbPath,
- { name: 'Plan: obs-test', type: 'workflow_checkpoint' },
- ['Initial observation'], ['plan']
- );
-
- const result = addObservation(dbPath, 'Plan: obs-test', 'New observation');
- expect(result).toBe(true);
-
- // Verify observation was added
- const obs = execFileSync('sqlite3', ['-json', dbPath,
- "SELECT content FROM observations WHERE entity_id = (SELECT id FROM entities WHERE name = 'Plan: obs-test') ORDER BY id"],
- { encoding: 'utf-8' });
- const rows = JSON.parse(obs);
- expect(rows).toHaveLength(2);
- expect(rows[1].content).toBe('New observation');
- });
- });
-
- describe('updateEntityMetadata', () => {
- it('should update entity metadata JSON', () => {
- sqliteBatchEntity(dbPath,
- { name: 'Plan: meta-test', type: 'workflow_checkpoint', metadata: JSON.stringify({ completed: 0 }) },
- [], ['plan']
- );
-
- const result = updateEntityMetadata(dbPath, 'Plan: meta-test', { completed: 1, status: 'active' });
- expect(result).toBe(true);
-
- // Verify metadata updated
- const row = execFileSync('sqlite3', [dbPath,
- "SELECT metadata FROM entities WHERE name = 'Plan: meta-test'"],
- { encoding: 'utf-8' }).trim();
- const meta = JSON.parse(row);
- expect(meta.completed).toBe(1);
- });
- });
-
- describe('updateEntityTag', () => {
- it('should swap tag from active to completed', () => {
- sqliteBatchEntity(dbPath,
- { name: 'Plan: tag-test', type: 'workflow_checkpoint' },
- [], ['plan', 'active']
- );
-
- const result = updateEntityTag(dbPath, 'Plan: tag-test', 'active', 'completed');
- expect(result).toBe(true);
-
- // Verify tag changed
- const tags = execFileSync('sqlite3', [dbPath,
- "SELECT tag FROM tags WHERE entity_id = (SELECT id FROM entities WHERE name = 'Plan: tag-test')"],
- { encoding: 'utf-8' }).trim().split('\n');
- expect(tags).toContain('completed');
- expect(tags).not.toContain('active');
- });
- });
-
- describe('createRelation', () => {
- it('should create relation between two entities', () => {
- sqliteBatchEntity(dbPath,
- { name: 'Commit abc: fix auth', type: 'commit' }, [], ['commit']);
- sqliteBatchEntity(dbPath,
- { name: 'Plan: auth-plan', type: 'workflow_checkpoint' }, [], ['plan']);
-
- const result = createRelation(dbPath, 'Commit abc: fix auth', 'Plan: auth-plan', 'depends_on');
- expect(result).toBe(true);
-
- // Verify relation created
- const rel = execFileSync('sqlite3', ['-json', dbPath,
- 'SELECT relation_type FROM relations'], { encoding: 'utf-8' });
- const rows = JSON.parse(rel);
- expect(rows).toHaveLength(1);
- expect(rows[0].relation_type).toBe('depends_on');
- });
-
- it('should not fail on duplicate relation', () => {
- sqliteBatchEntity(dbPath, { name: 'E1', type: 'commit' }, [], []);
- sqliteBatchEntity(dbPath, { name: 'E2', type: 'workflow_checkpoint' }, [], []);
-
- createRelation(dbPath, 'E1', 'E2', 'depends_on');
- const result = createRelation(dbPath, 'E1', 'E2', 'depends_on');
- expect(result).toBe(true); // INSERT OR IGNORE
- });
- });
-
- describe('sqliteBatchEntity duplicate handling', () => {
- it('should return null when entity with same name already exists', () => {
- const first = sqliteBatchEntity(dbPath,
- { name: 'Plan: dup-test', type: 'workflow_checkpoint', metadata: '{"v":1}' },
- ['obs1'], ['plan']
- );
- expect(first).not.toBeNull();
-
- // Second insert with same name should fail (UNIQUE constraint)
- const second = sqliteBatchEntity(dbPath,
- { name: 'Plan: dup-test', type: 'workflow_checkpoint', metadata: '{"v":2}' },
- ['obs2'], ['plan']
- );
- expect(second).toBeNull();
-
- // Metadata should still be the original value
- const rows = JSON.parse(execFileSync('sqlite3', ['-json', dbPath,
- "SELECT metadata FROM entities WHERE name = 'Plan: dup-test'"],
- { encoding: 'utf-8' }));
- expect(JSON.parse(rows[0].metadata).v).toBe(1);
- });
- });
-
- describe('sqliteQueryJSON', () => {
- it('should return empty array for query with no matching rows', () => {
- const result = sqliteQueryJSON(dbPath,
- 'SELECT * FROM entities WHERE name = ?', ['nonexistent']);
- expect(result).toEqual([]);
- });
-
- it('should return null for invalid database path', () => {
- const result = sqliteQueryJSON('/nonexistent/path.db',
- 'SELECT * FROM entities');
- expect(result).toBeNull();
- });
-
- it('should return parsed rows for valid query', () => {
- sqliteBatchEntity(dbPath,
- { name: 'Test Entity', type: 'test' }, [], []);
-
- const result = sqliteQueryJSON(dbPath,
- 'SELECT name, type FROM entities WHERE name = ?', ['Test Entity']);
- expect(result).toHaveLength(1);
- expect(result[0].name).toBe('Test Entity');
- });
- });
-});
diff --git a/tests/unit/hooks/plan-e2e.test.ts b/tests/unit/hooks/plan-e2e.test.ts
deleted file mode 100644
index 84025647..00000000
--- a/tests/unit/hooks/plan-e2e.test.ts
+++ /dev/null
@@ -1,133 +0,0 @@
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import fs from 'fs';
-import path from 'path';
-import os from 'os';
-import { execFileSync } from 'child_process';
-import {
- sqliteBatchEntity,
- parsePlanSteps,
- derivePlanName,
- isPlanFile,
- matchCommitToStep,
- queryActivePlans,
- addObservation,
- updateEntityMetadata,
- updateEntityTag,
- createRelation,
- renderTimeline,
- renderTimelineCompact,
-} from '../../../scripts/hooks/hook-utils.js';
-
-describe('Plan-Aware Hooks E2E Flow', () => {
- let dbPath;
- let testDir;
-
- beforeEach(() => {
- testDir = path.join(os.tmpdir(), `plan-e2e-${Date.now()}`);
- fs.mkdirSync(testDir, { recursive: true });
- dbPath = path.join(testDir, 'test.db');
- const schema = `
- CREATE TABLE entities (id INTEGER PRIMARY KEY, name TEXT UNIQUE, type TEXT, created_at TEXT, metadata TEXT DEFAULT '{}');
- CREATE TABLE observations (id INTEGER PRIMARY KEY, entity_id INTEGER, content TEXT, created_at TEXT);
- CREATE TABLE tags (id INTEGER PRIMARY KEY, entity_id INTEGER, tag TEXT);
- CREATE TABLE relations (id INTEGER PRIMARY KEY, from_entity_id INTEGER, to_entity_id INTEGER, relation_type TEXT, metadata TEXT DEFAULT '{}', created_at TEXT, UNIQUE(from_entity_id, to_entity_id, relation_type));
- `;
- execFileSync('sqlite3', [dbPath], { input: schema, encoding: 'utf-8' });
- });
-
- afterEach(() => {
- if (fs.existsSync(testDir)) {
- fs.rmSync(testDir, { recursive: true, force: true });
- }
- });
-
- it('should complete full plan lifecycle: create → progress → complete', () => {
- // 1. Simulate plan file creation
- const planContent = `# Auth System Plan
-## Tasks
-- [ ] Step 1: Set up auth middleware
-- [ ] Step 2: Add JWT validation
-- [ ] Step 3: Write tests
-`;
- const steps = parsePlanSteps(planContent);
- expect(steps).toHaveLength(3);
-
- const filePath = 'docs/plans/2026-02-25-auth-system-design.md';
- expect(isPlanFile(filePath)).toBe(true);
-
- const planName = derivePlanName(filePath);
- expect(planName).toBe('auth-system-design');
-
- // 2. Create plan entity in KG
- const entityName = `Plan: ${planName}`;
- const metadata = {
- sourceFile: filePath,
- totalSteps: steps.length,
- completed: 0,
- status: 'active',
- stepsDetail: steps,
- };
-
- sqliteBatchEntity(dbPath,
- { name: entityName, type: 'workflow_checkpoint', metadata: JSON.stringify(metadata) },
- steps.map(s => `Step ${s.number}: ${s.description}`),
- ['plan', 'active', `plan:${planName}`, 'scope:project']
- );
-
- // 3. Verify plan is active
- let activePlans = queryActivePlans(dbPath);
- expect(activePlans).toHaveLength(1);
- expect(activePlans[0].metadata.completed).toBe(0);
-
- // 4. Simulate first commit matching step 1
- const match1 = matchCommitToStep(
- { subject: 'feat(auth): Set up auth middleware', filesChanged: ['src/auth/middleware.ts'] },
- steps
- );
- expect(match1).not.toBeNull();
- expect(match1.step.number).toBe(1);
- expect(match1.confidence).toBeGreaterThan(0.3);
-
- // Update plan
- const updatedSteps1 = steps.map(s =>
- s.number === 1 ? { ...s, completed: true, commitHash: 'abc1234' } : s
- );
- updateEntityMetadata(dbPath, entityName, { ...metadata, completed: 1, stepsDetail: updatedSteps1 });
- addObservation(dbPath, entityName, 'Step 1 completed by abc1234');
-
- // 5. Verify timeline renders correctly
- activePlans = queryActivePlans(dbPath);
- const timeline = renderTimeline({
- ...activePlans[0],
- metadata: { ...activePlans[0].metadata, completed: 1, stepsDetail: updatedSteps1 },
- });
- expect(timeline).toContain('33%');
-
- // 6. Create commit entity and relation
- sqliteBatchEntity(dbPath,
- { name: 'Commit abc1234: feat(auth): Set up auth middleware', type: 'commit' },
- [], ['commit']
- );
- createRelation(dbPath, 'Commit abc1234: feat(auth): Set up auth middleware', entityName, 'depends_on');
-
- // 7. Complete remaining steps
- updateEntityMetadata(dbPath, entityName, {
- ...metadata,
- completed: 3,
- status: 'completed',
- stepsDetail: steps.map(s => ({ ...s, completed: true })),
- });
- updateEntityTag(dbPath, entityName, 'active', 'completed');
-
- // 8. Verify plan no longer shows as active
- activePlans = queryActivePlans(dbPath);
- expect(activePlans).toHaveLength(0);
-
- // 9. Compact timeline shows complete
- const compact = renderTimelineCompact({
- name: entityName,
- metadata: { totalSteps: 3, completed: 3, stepsDetail: steps.map(s => ({ ...s, completed: true })) },
- });
- expect(compact).toContain('Complete');
- });
-});
diff --git a/tests/unit/hooks/plan-utils.test.ts b/tests/unit/hooks/plan-utils.test.ts
deleted file mode 100644
index a9e38996..00000000
--- a/tests/unit/hooks/plan-utils.test.ts
+++ /dev/null
@@ -1,439 +0,0 @@
-import { describe, it, expect } from 'vitest';
-import {
- tokenize,
- extractModuleHints,
- derivePlanName,
- parsePlanSteps,
- matchCommitToStep,
- renderTimeline,
- renderTimelineCompact,
- isPlanFile,
-} from '../../../scripts/hooks/hook-utils.js';
-
-describe('tokenize', () => {
- it('should lowercase and split text into words', () => {
- // 'set' and 'up' are filtered (stop words / too short)
- expect(tokenize('Set Up Auth Middleware')).toEqual(['auth', 'middleware']);
- });
-
- it('should remove punctuation', () => {
- expect(tokenize('feat(auth): add validation!')).toEqual(['feat', 'auth', 'add', 'validation']);
- });
-
- it('should filter words shorter than 3 chars', () => {
- expect(tokenize('a is to the big dog')).toEqual(['big', 'dog']);
- });
-
- it('should filter common stop words', () => {
- expect(tokenize('add the new feature for users')).toEqual(['add', 'new', 'feature', 'users']);
- });
-
- it('should return empty array for empty input', () => {
- expect(tokenize('')).toEqual([]);
- });
-});
-
-describe('extractModuleHints', () => {
- it('should extract meaningful words from step description', () => {
- const hints = extractModuleHints('Set up auth middleware');
- expect(hints).toContain('auth');
- expect(hints).toContain('middleware');
- });
-
- it('should handle technical terms', () => {
- const hints = extractModuleHints('Add JWT validation logic');
- expect(hints).toContain('jwt');
- expect(hints).toContain('validation');
- });
-});
-
-describe('derivePlanName', () => {
- it('should extract name from docs/plans/ path', () => {
- expect(derivePlanName('docs/plans/2026-02-25-auth-system-design.md'))
- .toBe('auth-system-design');
- });
-
- it('should extract name from -design.md suffix', () => {
- expect(derivePlanName('docs/auth-system-design.md'))
- .toBe('auth-system-design');
- });
-
- it('should extract name from -plan.md suffix', () => {
- expect(derivePlanName('my-feature-plan.md'))
- .toBe('my-feature-plan');
- });
-
- it('should handle path with no date prefix', () => {
- expect(derivePlanName('docs/plans/auth-design.md'))
- .toBe('auth-design');
- });
-});
-
-describe('parsePlanSteps', () => {
- it('should parse checkbox format steps', () => {
- const content = `# Plan
-## Tasks
-- [ ] Step 1: Set up auth middleware
-- [ ] Step 2: Add JWT validation
-- [ ] Step 3: Write tests
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(3);
- expect(steps[0]).toEqual({ number: 1, description: 'Set up auth middleware', completed: false });
- expect(steps[1]).toEqual({ number: 2, description: 'Add JWT validation', completed: false });
- expect(steps[2]).toEqual({ number: 3, description: 'Write tests', completed: false });
- });
-
- it('should parse checkbox format without "Step N:" prefix', () => {
- const content = `- [ ] Set up database
-- [ ] Create API endpoints
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(2);
- expect(steps[0]).toEqual({ number: 1, description: 'Set up database', completed: false });
- expect(steps[1]).toEqual({ number: 2, description: 'Create API endpoints', completed: false });
- });
-
- it('should parse heading format steps', () => {
- const content = `# Plan
-## Step 1: Set up auth middleware
-Some details here.
-## Step 2: Add JWT validation
-More details.
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(2);
- expect(steps[0]).toEqual({ number: 1, description: 'Set up auth middleware', completed: false });
- });
-
- it('should parse numbered heading format', () => {
- const content = `### 1. Set up auth middleware
-### 2. Add JWT validation
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(2);
- expect(steps[0]).toEqual({ number: 1, description: 'Set up auth middleware', completed: false });
- });
-
- it('should detect already-checked items as completed', () => {
- const content = `- [x] Step 1: Already done
-- [ ] Step 2: Not done yet
-`;
- const steps = parsePlanSteps(content);
- expect(steps[0].completed).toBe(true);
- expect(steps[1].completed).toBe(false);
- });
-
- it('should return empty array for content with no steps', () => {
- const content = `# Just a regular document
-Some paragraph text here.
-`;
- expect(parsePlanSteps(content)).toEqual([]);
- });
-
- it('should handle Task N: prefix format', () => {
- const content = `### Task 1: Set up database
-### Task 2: Create API
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(2);
- expect(steps[0].description).toBe('Set up database');
- });
-
- it('should skip steps inside code fences', () => {
- const content = `# Implementation Plan
-## Tasks
-- [ ] Task 1: Set up database
-- [ ] Task 2: Create API
-
-### Task 1: Set up database
-
-**Step 1: Write the failing test**
-
-\`\`\`markdown
-- [ ] Set up auth middleware
-- [ ] Add JWT validation
-- [ ] Write tests
-\`\`\`
-
-**Step 2: Implement**
-
-\`\`\`javascript
-// example code
-const steps = parsePlanSteps(content);
-\`\`\`
-
-### Task 2: Create API
-`;
- const steps = parsePlanSteps(content);
- // Should only find 2 checkbox steps + 2 heading steps = 4
- // NOT the 3 example checkboxes inside the code fence
- expect(steps).toHaveLength(4);
- expect(steps[0].description).toBe('Set up database');
- expect(steps[1].description).toBe('Create API');
- });
-
- it('should handle nested code fences and indented fences', () => {
- const content = `- [ ] Real step one
-\`\`\`
-- [ ] Fake step inside fence
-\`\`\`
-- [ ] Real step two
-`;
- const steps = parsePlanSteps(content);
- expect(steps).toHaveLength(2);
- expect(steps[0].description).toBe('Real step one');
- expect(steps[1].description).toBe('Real step two');
- });
-});
-
-describe('matchCommitToStep', () => {
- const planSteps = [
- { number: 1, description: 'Set up auth middleware', completed: false },
- { number: 2, description: 'Add JWT validation', completed: false },
- { number: 3, description: 'Write integration tests', completed: false },
- ];
-
- it('should match commit to best matching step with confidence', () => {
- const commitInfo = {
- subject: 'feat(auth): Set up auth middleware',
- filesChanged: ['src/auth/middleware.ts'],
- };
- const match = matchCommitToStep(commitInfo, planSteps);
- expect(match).not.toBeNull();
- expect(match.step.number).toBe(1);
- expect(match.confidence).toBeGreaterThan(0.3);
- });
-
- it('should return high confidence for strong keyword + file match', () => {
- const commitInfo = {
- subject: 'feat(auth): Set up auth middleware',
- filesChanged: ['src/auth/middleware.ts'],
- };
- const match = matchCommitToStep(commitInfo, planSteps);
- expect(match.confidence).toBeGreaterThanOrEqual(0.6);
- });
-
- it('should get bonus score from file path match', () => {
- const commitInfo = {
- subject: 'feat: add validation logic',
- filesChanged: ['src/auth/jwt-validator.ts'],
- };
- const match = matchCommitToStep(commitInfo, planSteps);
- expect(match).not.toBeNull();
- expect(match.step.number).toBe(2); // jwt matches
- });
-
- it('should skip already completed steps', () => {
- const stepsWithCompleted = [
- { number: 1, description: 'Set up auth middleware', completed: true },
- { number: 2, description: 'Set up auth routes', completed: false },
- ];
- const commitInfo = {
- subject: 'feat(auth): Set up auth routes',
- filesChanged: ['src/auth/routes.ts'],
- };
- const match = matchCommitToStep(commitInfo, stepsWithCompleted);
- expect(match).not.toBeNull();
- expect(match.step.number).toBe(2);
- });
-
- it('should return null when no step matches above threshold', () => {
- const commitInfo = {
- subject: 'docs: update README',
- filesChanged: ['README.md'],
- };
- const match = matchCommitToStep(commitInfo, planSteps);
- expect(match).toBeNull();
- });
-
- it('should return null for empty steps array', () => {
- const commitInfo = { subject: 'feat: something', filesChanged: [] };
- expect(matchCommitToStep(commitInfo, [])).toBeNull();
- });
-});
-
-describe('renderTimeline', () => {
- const makePlan = (completed, total) => ({
- name: 'Plan: auth-system',
- metadata: {
- totalSteps: total,
- completed,
- stepsDetail: Array.from({ length: total }, (_, i) => ({
- number: i + 1,
- description: `Step ${i + 1} description`,
- completed: i < completed,
- commitHash: i < completed ? `abc${i}` : undefined,
- })),
- },
- _lastCommit: 'def456',
- });
-
- it('should render timeline with correct node symbols', () => {
- const output = renderTimeline(makePlan(2, 4));
- expect(output).toContain('\u25cf'); // ● completed nodes
- expect(output).toContain('\u25cb'); // ○ pending nodes
- expect(output).toContain('50%');
- });
-
- it('should highlight a specific step', () => {
- const output = renderTimeline(makePlan(2, 4), 2);
- expect(output).toContain('def456'); // last commit reference
- });
-
- it('should show next step', () => {
- const output = renderTimeline(makePlan(2, 4));
- expect(output).toContain('Next');
- expect(output).toContain('Step 3 description');
- });
-
- it('should show completion message when all done', () => {
- const output = renderTimeline(makePlan(4, 4));
- expect(output).toContain('complete');
- });
-
- it('should show (?) marker for low confidence match', () => {
- const plan = makePlan(2, 4);
- plan._matchConfidence = 0.4;
- const output = renderTimeline(plan, 2);
- expect(output).toContain('(?)');
- });
-
- it('should not show (?) for high confidence match', () => {
- const plan = makePlan(2, 4);
- plan._matchConfidence = 0.8;
- const output = renderTimeline(plan, 2);
- expect(output).not.toContain('(?)');
- });
-
- it('should return empty string for missing stepsDetail', () => {
- const plan = { name: 'Plan: broken', metadata: {} };
- expect(renderTimeline(plan)).toBe('');
- });
-
- it('should return empty string for totalSteps = 0', () => {
- const plan = { name: 'Plan: empty', metadata: { totalSteps: 0, completed: 0, stepsDetail: [] } };
- expect(renderTimeline(plan)).toBe('');
- });
-
- it('should show 0% when completed is undefined (not NaN%)', () => {
- const plan = {
- name: 'Plan: no-completed',
- metadata: {
- totalSteps: 3,
- // completed is intentionally missing
- stepsDetail: [
- { number: 1, description: 'Step 1', completed: false },
- { number: 2, description: 'Step 2', completed: false },
- { number: 3, description: 'Step 3', completed: false },
- ],
- },
- };
- const output = renderTimeline(plan);
- expect(output).toContain('0%');
- expect(output).not.toContain('NaN');
- });
-
- it('should show ◉ for highlighted step even when completed', () => {
- // Simulates the real post-commit flow: step 2 was JUST completed
- const plan = makePlan(2, 4);
- const output = renderTimeline(plan, 2);
- // The highlighted step should show ◉, not ● like other completed steps
- const nodeSection = output.split('\n').find(l => l.includes('\u25cf') || l.includes('\u25c9'));
- // Count ◉ symbols — should be 2 (highlighted step 2 + next step 3)
- const highlightCount = (nodeSection.match(/\u25c9/g) || []).length;
- expect(highlightCount).toBe(2);
- });
-});
-
-describe('renderTimelineCompact', () => {
- const makePlan = (completed, total) => ({
- name: 'Plan: auth-system',
- metadata: {
- totalSteps: total,
- completed,
- stepsDetail: Array.from({ length: total }, (_, i) => ({
- number: i + 1,
- description: `Step ${i + 1} desc`,
- completed: i < completed,
- })),
- },
- });
-
- it('should render compact timeline with percentage', () => {
- const output = renderTimelineCompact(makePlan(2, 4));
- expect(output).toContain('50%');
- expect(output).toContain('auth-system');
- });
-
- it('should show next step in compact view', () => {
- const output = renderTimelineCompact(makePlan(1, 3));
- expect(output).toContain('Step 2 desc');
- });
-
- it('should show complete in compact view when done', () => {
- const output = renderTimelineCompact(makePlan(3, 3));
- expect(output).toContain('Complete');
- });
-
- it('should return empty string for missing stepsDetail', () => {
- const plan = { name: 'Plan: broken', metadata: {} };
- expect(renderTimelineCompact(plan)).toBe('');
- });
-
- it('should return empty string for totalSteps = 0', () => {
- const plan = { name: 'Plan: empty', metadata: { totalSteps: 0, completed: 0, stepsDetail: [] } };
- expect(renderTimelineCompact(plan)).toBe('');
- });
-
- it('should show 0% when completed is undefined (not NaN%)', () => {
- const plan = {
- name: 'Plan: no-completed',
- metadata: {
- totalSteps: 2,
- // completed is intentionally missing
- stepsDetail: [
- { number: 1, description: 'Step 1', completed: false },
- { number: 2, description: 'Step 2', completed: false },
- ],
- },
- };
- const output = renderTimelineCompact(plan);
- expect(output).toContain('0%');
- expect(output).not.toContain('NaN');
- });
-});
-
-describe('isPlanFile', () => {
- it('should match docs/plans/*.md', () => {
- expect(isPlanFile('docs/plans/2026-02-25-auth-design.md')).toBe(true);
- expect(isPlanFile('docs/plans/my-plan.md')).toBe(true);
- });
-
- it('should match *-design.md under docs/', () => {
- expect(isPlanFile('docs/auth-system-design.md')).toBe(true);
- expect(isPlanFile('/full/path/to/docs/feature-design.md')).toBe(true);
- });
-
- it('should match *-plan.md under docs/', () => {
- expect(isPlanFile('docs/my-feature-plan.md')).toBe(true);
- });
-
- it('should NOT match -design.md or -plan.md outside docs/', () => {
- expect(isPlanFile('src/css-design.md')).toBe(false);
- expect(isPlanFile('my-feature-plan.md')).toBe(false);
- expect(isPlanFile('/full/path/to/feature-design.md')).toBe(false);
- });
-
- it('should NOT match regular files', () => {
- expect(isPlanFile('src/index.ts')).toBe(false);
- expect(isPlanFile('README.md')).toBe(false);
- expect(isPlanFile('docs/ARCHITECTURE.md')).toBe(false);
- });
-
- it('should handle undefined/null gracefully', () => {
- expect(isPlanFile(undefined)).toBe(false);
- expect(isPlanFile(null)).toBe(false);
- expect(isPlanFile('')).toBe(false);
- });
-});
diff --git a/tests/unit/i18n.test.ts b/tests/unit/i18n.test.ts
deleted file mode 100644
index 0131fe3b..00000000
--- a/tests/unit/i18n.test.ts
+++ /dev/null
@@ -1,223 +0,0 @@
-import { describe, it, expect, beforeEach, afterEach } from 'vitest';
-import {
- detectLocale,
- t,
- setLocale,
- getLocale,
- type Locale,
-} from '../../src/i18n/index.js';
-
-describe('i18n Module', () => {
- // Save original env
- const originalEnv = { ...process.env };
-
- afterEach(() => {
- // Restore original env
- process.env = { ...originalEnv };
- // Reset locale to default
- setLocale('en');
- });
-
- describe('detectLocale', () => {
- it('should detect English locale from LANG environment variable', () => {
- process.env.LANG = 'en_US.UTF-8';
- expect(detectLocale()).toBe('en');
- });
-
- it('should detect Traditional Chinese locale from LANG', () => {
- process.env.LANG = 'zh_TW.UTF-8';
- expect(detectLocale()).toBe('zh-TW');
- });
-
- it('should detect Simplified Chinese locale from LANG', () => {
- process.env.LANG = 'zh_CN.UTF-8';
- expect(detectLocale()).toBe('zh-CN');
- });
-
- it('should detect Japanese locale from LANG', () => {
- process.env.LANG = 'ja_JP.UTF-8';
- expect(detectLocale()).toBe('ja');
- });
-
- it('should fallback to English for unsupported locales', () => {
- process.env.LANG = 'fr_FR.UTF-8';
- expect(detectLocale()).toBe('en');
- });
-
- it('should fallback to English when LANG is not set', () => {
- delete process.env.LANG;
- delete process.env.LC_ALL;
- delete process.env.LANGUAGE;
- expect(detectLocale()).toBe('en');
- });
-
- it('should check LC_ALL before LANG', () => {
- process.env.LC_ALL = 'ja_JP.UTF-8';
- process.env.LANG = 'en_US.UTF-8';
- expect(detectLocale()).toBe('ja');
- });
-
- it('should check LANGUAGE before LC_ALL and LANG', () => {
- process.env.LANGUAGE = 'zh_TW';
- process.env.LC_ALL = 'ja_JP.UTF-8';
- process.env.LANG = 'en_US.UTF-8';
- expect(detectLocale()).toBe('zh-TW');
- });
- });
-
- describe('setLocale and getLocale', () => {
- it('should set and get locale correctly', () => {
- setLocale('zh-TW');
- expect(getLocale()).toBe('zh-TW');
- });
-
- it('should default to English', () => {
- expect(getLocale()).toBe('en');
- });
-
- it('should accept all supported locales', () => {
- const locales: Locale[] = ['en', 'zh-TW', 'zh-CN', 'ja'];
- for (const locale of locales) {
- setLocale(locale);
- expect(getLocale()).toBe(locale);
- }
- });
- });
-
- describe('t (translate) function', () => {
- beforeEach(() => {
- setLocale('en');
- });
-
- describe('Basic key resolution', () => {
- it('should resolve a simple key', () => {
- const result = t('ccb.rule.readBeforeEdit');
- expect(result).toContain('MeMesh');
- expect(result.length).toBeGreaterThan(0);
- });
-
- it('should return key itself for missing keys', () => {
- const result = t('non.existent.key');
- expect(result).toBe('non.existent.key');
- });
- });
-
- describe('Parameter interpolation', () => {
- it('should interpolate single parameter', () => {
- const result = t('ccb.reminder.memories', { count: 5 });
- expect(result).toContain('5');
- expect(result).toContain('MeMesh');
- });
-
- it('should interpolate multiple parameters', () => {
- const result = t('ccb.reminder.mistakes', { count: 3, days: 7 });
- expect(result).toContain('3');
- expect(result).toContain('7');
- expect(result).toContain('MeMesh');
- });
-
- it('should leave unreplaced params as placeholders', () => {
- const result = t('ccb.reminder.mistakes', { count: 3 });
- // Should still contain ${days} since it wasn't provided
- expect(result).toContain('${days}');
- });
-
- it('should handle string parameters', () => {
- const result = t('ccb.reminder.mistakes', {
- count: '5',
- days: '7',
- });
- expect(result).toContain('5');
- });
- });
-
- describe('Multi-locale support', () => {
- it('should translate to Traditional Chinese', () => {
- setLocale('zh-TW');
- const result = t('ccb.rule.readBeforeEdit');
- expect(result).toContain('MeMesh');
- // Should not be the English version
- expect(result).not.toBe(
- (() => {
- setLocale('en');
- return t('ccb.rule.readBeforeEdit');
- })()
- );
- });
-
- it('should translate to Simplified Chinese', () => {
- setLocale('zh-CN');
- const result = t('ccb.rule.readBeforeEdit');
- expect(result).toContain('MeMesh');
- });
-
- it('should translate to Japanese', () => {
- setLocale('ja');
- const result = t('ccb.rule.readBeforeEdit');
- expect(result).toContain('MeMesh');
- });
-
- it('should fallback to English for missing keys in other locales', () => {
- setLocale('zh-TW');
- // Assuming this key only exists in English
- const result = t('ccb.fallback.test.key');
- // Should return the key itself since it doesn't exist anywhere
- expect(result).toBe('ccb.fallback.test.key');
- });
- });
-
- describe('MeMesh Branding requirement', () => {
- const ccbKeys = [
- 'ccb.reminder.mistakes',
- 'ccb.reminder.memories',
- 'ccb.reminder.preferences',
- 'ccb.rule.readBeforeEdit',
- 'ccb.rule.verifyBeforeClaim',
- 'ccb.preference.violation',
- ];
-
- for (const key of ccbKeys) {
- it(`should include MeMesh branding in ${key}`, () => {
- const result = t(key, { count: 1, days: 1, content: 'test' });
- expect(result).toContain('MeMesh');
- });
- }
-
- it('should include MeMesh branding in all locales', () => {
- const locales: Locale[] = ['en', 'zh-TW', 'zh-CN', 'ja'];
- for (const locale of locales) {
- setLocale(locale);
- const result = t('ccb.reminder.memories', { count: 5 });
- expect(result).toContain('MeMesh');
- }
- });
- });
- });
-
- describe('Edge cases', () => {
- it('should handle empty params object', () => {
- const result = t('ccb.rule.readBeforeEdit', {});
- expect(result).toContain('MeMesh');
- });
-
- it('should handle numeric zero as param', () => {
- const result = t('ccb.reminder.memories', { count: 0 });
- expect(result).toContain('0');
- });
-
- it('should handle special characters in params', () => {
- const result = t('ccb.reminder.operationWarning', {
- content: '',
- });
- expect(result).toContain('