f38b5b85bc
* docs: add investigation reports for 5 open GitHub issues Comprehensive analysis of issues #543, #544, #545, #555, and #557: - #557: settings.json not generated, module loader error (node/bun mismatch) - #555: Windows hooks not executing, hasIpc always false - #545: formatTool crashes on non-JSON tool_input strings - #544: mem-search skill hint shown incorrectly to Claude Code users - #543: /claude-mem slash command unavailable despite installation Each report includes root cause analysis, affected files, and proposed fixes. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * fix(logger): handle non-JSON tool_input in formatTool (#545) Wrap JSON.parse in try-catch to handle raw string inputs (e.g., Bash commands) that aren't valid JSON. Falls back to using the string as-is. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * fix(context): update mem-search hint to reference MCP tools (#544) Update hint messages to reference MCP tools (search, get_observations) instead of the deprecated "mem-search skill" terminology. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * fix(settings): auto-create settings.json on first load (#557, #543) When settings.json doesn't exist, create it with defaults instead of returning in-memory defaults. Creates parent directory if needed. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * fix(hooks): use bun runtime for hooks except smart-install (#557) Change hook commands from node to bun since hooks use bun:sqlite. Keep smart-install.js on node since it bootstraps bun installation. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * chore: rebuild plugin scripts * docs: clarify that build artifacts must be committed * fix(docs): update build artifacts directory reference in CLAUDE.md * test: add test coverage for PR #558 fixes - Fix 2 failing tests: update "mem-search skill" → "MCP tools" expectations - Add 56 tests for formatTool() JSON.parse crash fix (Issue #545) - Add 27 tests for settings.json auto-creation (Issue #543) Test coverage includes: - formatTool: JSON parsing, raw strings, objects, null/undefined, all tool types - Settings: file creation, directory creation, schema migration, edge cases 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * fix(tests): clean up flaky tests and fix circular dependency Phase 1 of test quality improvements: - Delete 6 harmful/worthless test files that used problematic mock.module() patterns or tested implementation details rather than behavior: - context-builder.test.ts (tested internal implementation) - export-types.test.ts (fragile mock patterns) - smart-install.test.ts (shell script testing antipattern) - session_id_refactor.test.ts (outdated, tested refactoring itself) - validate_sql_update.test.ts (one-time migration validation) - observation-broadcaster.test.ts (excessive mocking) - Fix circular dependency between logger.ts and SettingsDefaultsManager.ts by using late binding pattern - logger now lazily loads settings - Refactor mock.module() to spyOn() in several test files for more maintainable and less brittle tests: - observation-compiler.test.ts - gemini_agent.test.ts - error-handler.test.ts - server.test.ts - response-processor.test.ts All 649 tests pass. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * refactor(tests): phase 2 - reduce mock-heavy tests and improve focus - Remove mock-heavy query tests from observation-compiler.test.ts, keep real buildTimeline tests - Convert session_id_usage_validation.test.ts from 477 to 178 lines of focused smoke tests - Remove tests for language built-ins from worker-spawn.test.ts (JSON.parse, array indexing) - Rename logger-coverage.test.ts to logger-usage-standards.test.ts for clarity 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * docs(tests): phase 3 - add JSDoc mock justification to test files Document mock usage rationale in 5 test files to improve maintainability: - error-handler.test.ts: Express req/res mocks, logger spies (~11%) - fallback-error-handler.test.ts: Zero mocks, pure function tests - session-cleanup-helper.test.ts: Session fixtures, worker mocks (~19%) - hook-constants.test.ts: process.platform mock for Windows tests (~12%) - session_store.test.ts: Zero mocks, real SQLite :memory: database Part of ongoing effort to document mock justifications per TESTING.md guidelines. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * test(integration): phase 5 - add 72 tests for critical coverage gaps Add comprehensive test coverage for previously untested areas: - tests/integration/hook-execution-e2e.test.ts (10 tests) Tests lifecycle hooks execution flow and context propagation - tests/integration/worker-api-endpoints.test.ts (19 tests) Tests all worker service HTTP endpoints without heavy mocking - tests/integration/chroma-vector-sync.test.ts (16 tests) Tests vector embedding synchronization with ChromaDB - tests/utils/tag-stripping.test.ts (27 tests) Tests privacy tag stripping utilities for both <private> and <meta-observation> tags All tests use real implementations where feasible, following the project's testing philosophy of preferring integration-style tests over unit tests with extensive mocking. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * context update * docs: add comment linking DEFAULT_DATA_DIR locations Added NOTE comment in logger.ts pointing to the canonical DEFAULT_DATA_DIR in SettingsDefaultsManager.ts. This addresses PR reviewer feedback about the fragility of having the default defined in two places to avoid circular dependencies. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
389 lines
12 KiB
TypeScript
389 lines
12 KiB
TypeScript
/**
|
|
* Worker API Endpoints Integration Tests
|
|
*
|
|
* Tests all REST API endpoints with real HTTP and database.
|
|
* Uses real Server instance with in-memory database.
|
|
*
|
|
* Sources:
|
|
* - Server patterns from tests/server/server.test.ts
|
|
* - Session routes from src/services/worker/http/routes/SessionRoutes.ts
|
|
* - Search routes from src/services/worker/http/routes/SearchRoutes.ts
|
|
*/
|
|
|
|
import { describe, it, expect, beforeEach, afterEach, spyOn, mock } from 'bun:test';
|
|
import { logger } from '../../src/utils/logger.js';
|
|
|
|
// Mock middleware to avoid complex dependencies
|
|
mock.module('../../src/services/worker/http/middleware.js', () => ({
|
|
createMiddleware: () => [],
|
|
requireLocalhost: (_req: any, _res: any, next: any) => next(),
|
|
summarizeRequestBody: () => 'test body',
|
|
}));
|
|
|
|
// Import after mocks
|
|
import { Server } from '../../src/services/server/Server.js';
|
|
import type { ServerOptions } from '../../src/services/server/Server.js';
|
|
|
|
// Suppress logger output during tests
|
|
let loggerSpies: ReturnType<typeof spyOn>[] = [];
|
|
|
|
describe('Worker API Endpoints Integration', () => {
|
|
let server: Server;
|
|
let testPort: number;
|
|
let mockOptions: ServerOptions;
|
|
|
|
beforeEach(() => {
|
|
loggerSpies = [
|
|
spyOn(logger, 'info').mockImplementation(() => {}),
|
|
spyOn(logger, 'debug').mockImplementation(() => {}),
|
|
spyOn(logger, 'warn').mockImplementation(() => {}),
|
|
spyOn(logger, 'error').mockImplementation(() => {}),
|
|
];
|
|
|
|
mockOptions = {
|
|
getInitializationComplete: () => true,
|
|
getMcpReady: () => true,
|
|
onShutdown: mock(() => Promise.resolve()),
|
|
onRestart: mock(() => Promise.resolve()),
|
|
};
|
|
|
|
testPort = 40000 + Math.floor(Math.random() * 10000);
|
|
});
|
|
|
|
afterEach(async () => {
|
|
loggerSpies.forEach(spy => spy.mockRestore());
|
|
|
|
if (server && server.getHttpServer()) {
|
|
try {
|
|
await server.close();
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
}
|
|
mock.restore();
|
|
});
|
|
|
|
describe('Health/Readiness/Version Endpoints', () => {
|
|
describe('GET /api/health', () => {
|
|
it('should return status, initialized, mcpReady, platform, pid', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
expect(response.status).toBe(200);
|
|
|
|
const body = await response.json();
|
|
expect(body).toHaveProperty('status', 'ok');
|
|
expect(body).toHaveProperty('initialized', true);
|
|
expect(body).toHaveProperty('mcpReady', true);
|
|
expect(body).toHaveProperty('platform');
|
|
expect(body).toHaveProperty('pid');
|
|
expect(typeof body.platform).toBe('string');
|
|
expect(typeof body.pid).toBe('number');
|
|
});
|
|
|
|
it('should reflect uninitialized state', async () => {
|
|
const uninitOptions: ServerOptions = {
|
|
getInitializationComplete: () => false,
|
|
getMcpReady: () => false,
|
|
onShutdown: mock(() => Promise.resolve()),
|
|
onRestart: mock(() => Promise.resolve()),
|
|
};
|
|
|
|
server = new Server(uninitOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
const body = await response.json();
|
|
|
|
expect(body.status).toBe('ok'); // Health always returns ok
|
|
expect(body.initialized).toBe(false);
|
|
expect(body.mcpReady).toBe(false);
|
|
});
|
|
});
|
|
|
|
describe('GET /api/readiness', () => {
|
|
it('should return 200 with status ready when initialized', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/readiness`);
|
|
expect(response.status).toBe(200);
|
|
|
|
const body = await response.json();
|
|
expect(body.status).toBe('ready');
|
|
expect(body.mcpReady).toBe(true);
|
|
});
|
|
|
|
it('should return 503 with status initializing when not ready', async () => {
|
|
const uninitOptions: ServerOptions = {
|
|
getInitializationComplete: () => false,
|
|
getMcpReady: () => false,
|
|
onShutdown: mock(() => Promise.resolve()),
|
|
onRestart: mock(() => Promise.resolve()),
|
|
};
|
|
|
|
server = new Server(uninitOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/readiness`);
|
|
expect(response.status).toBe(503);
|
|
|
|
const body = await response.json();
|
|
expect(body.status).toBe('initializing');
|
|
expect(body.message).toContain('initializing');
|
|
});
|
|
});
|
|
|
|
describe('GET /api/version', () => {
|
|
it('should return version string', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/version`);
|
|
expect(response.status).toBe(200);
|
|
|
|
const body = await response.json();
|
|
expect(body).toHaveProperty('version');
|
|
expect(typeof body.version).toBe('string');
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Error Handling', () => {
|
|
describe('404 Not Found', () => {
|
|
it('should return 404 for unknown GET routes', async () => {
|
|
server = new Server(mockOptions);
|
|
server.finalizeRoutes();
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/unknown-endpoint`);
|
|
expect(response.status).toBe(404);
|
|
|
|
const body = await response.json();
|
|
expect(body.error).toBe('NotFound');
|
|
});
|
|
|
|
it('should return 404 for unknown POST routes', async () => {
|
|
server = new Server(mockOptions);
|
|
server.finalizeRoutes();
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/unknown-endpoint`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ test: 'data' })
|
|
});
|
|
expect(response.status).toBe(404);
|
|
});
|
|
|
|
it('should return 404 for nested unknown routes', async () => {
|
|
server = new Server(mockOptions);
|
|
server.finalizeRoutes();
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/search/nonexistent/nested`);
|
|
expect(response.status).toBe(404);
|
|
});
|
|
});
|
|
|
|
describe('Method handling', () => {
|
|
it('should handle OPTIONS requests', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/health`, {
|
|
method: 'OPTIONS'
|
|
});
|
|
// OPTIONS should either return 200 or 204 (CORS preflight)
|
|
expect([200, 204]).toContain(response.status);
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Content-Type Handling', () => {
|
|
it('should accept application/json content type', async () => {
|
|
server = new Server(mockOptions);
|
|
server.finalizeRoutes();
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/nonexistent`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ key: 'value' })
|
|
});
|
|
|
|
// Should get 404 (route not found), not a content-type error
|
|
expect(response.status).toBe(404);
|
|
});
|
|
|
|
it('should return JSON responses with correct content type', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
const contentType = response.headers.get('content-type');
|
|
|
|
expect(contentType).toContain('application/json');
|
|
});
|
|
});
|
|
|
|
describe('Server State Management', () => {
|
|
it('should track initialization state dynamically', async () => {
|
|
let initialized = false;
|
|
const dynamicOptions: ServerOptions = {
|
|
getInitializationComplete: () => initialized,
|
|
getMcpReady: () => true,
|
|
onShutdown: mock(() => Promise.resolve()),
|
|
onRestart: mock(() => Promise.resolve()),
|
|
};
|
|
|
|
server = new Server(dynamicOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
// Check uninitialized
|
|
let response = await fetch(`http://127.0.0.1:${testPort}/api/readiness`);
|
|
expect(response.status).toBe(503);
|
|
|
|
// Initialize
|
|
initialized = true;
|
|
|
|
// Check initialized
|
|
response = await fetch(`http://127.0.0.1:${testPort}/api/readiness`);
|
|
expect(response.status).toBe(200);
|
|
});
|
|
|
|
it('should track MCP ready state dynamically', async () => {
|
|
let mcpReady = false;
|
|
const dynamicOptions: ServerOptions = {
|
|
getInitializationComplete: () => true,
|
|
getMcpReady: () => mcpReady,
|
|
onShutdown: mock(() => Promise.resolve()),
|
|
onRestart: mock(() => Promise.resolve()),
|
|
};
|
|
|
|
server = new Server(dynamicOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
// Check MCP not ready
|
|
let response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
let body = await response.json();
|
|
expect(body.mcpReady).toBe(false);
|
|
|
|
// Set MCP ready
|
|
mcpReady = true;
|
|
|
|
// Check MCP ready
|
|
response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
body = await response.json();
|
|
expect(body.mcpReady).toBe(true);
|
|
});
|
|
});
|
|
|
|
describe('Server Lifecycle', () => {
|
|
it('should start listening on specified port', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
const httpServer = server.getHttpServer();
|
|
expect(httpServer).not.toBeNull();
|
|
expect(httpServer!.listening).toBe(true);
|
|
});
|
|
|
|
it('should close gracefully', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
// Verify it's running
|
|
const response = await fetch(`http://127.0.0.1:${testPort}/api/health`);
|
|
expect(response.status).toBe(200);
|
|
|
|
// Close
|
|
try {
|
|
await server.close();
|
|
} catch (e: any) {
|
|
if (e.code !== 'ERR_SERVER_NOT_RUNNING') throw e;
|
|
}
|
|
|
|
// Verify closed
|
|
const httpServer = server.getHttpServer();
|
|
if (httpServer) {
|
|
expect(httpServer.listening).toBe(false);
|
|
}
|
|
});
|
|
|
|
it('should handle port conflicts', async () => {
|
|
server = new Server(mockOptions);
|
|
const server2 = new Server(mockOptions);
|
|
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
// Second server should fail on same port
|
|
await expect(server2.listen(testPort, '127.0.0.1')).rejects.toThrow();
|
|
|
|
// Clean up second server if it has a reference
|
|
const httpServer2 = server2.getHttpServer();
|
|
if (httpServer2) {
|
|
expect(httpServer2.listening).toBe(false);
|
|
}
|
|
});
|
|
|
|
it('should allow restart on same port after close', async () => {
|
|
server = new Server(mockOptions);
|
|
await server.listen(testPort, '127.0.0.1');
|
|
|
|
// Close first server
|
|
try {
|
|
await server.close();
|
|
} catch (e: any) {
|
|
if (e.code !== 'ERR_SERVER_NOT_RUNNING') throw e;
|
|
}
|
|
|
|
// Wait for port to be released
|
|
await new Promise(resolve => setTimeout(resolve, 100));
|
|
|
|
// Start second server on same port
|
|
const server2 = new Server(mockOptions);
|
|
await server2.listen(testPort, '127.0.0.1');
|
|
|
|
expect(server2.getHttpServer()!.listening).toBe(true);
|
|
|
|
// Clean up
|
|
try {
|
|
await server2.close();
|
|
} catch {
|
|
// Ignore cleanup errors
|
|
}
|
|
});
|
|
});
|
|
|
|
describe('Route Registration', () => {
|
|
it('should register route handlers', () => {
|
|
server = new Server(mockOptions);
|
|
|
|
const setupRoutesMock = mock(() => {});
|
|
const mockRouteHandler = {
|
|
setupRoutes: setupRoutesMock,
|
|
};
|
|
|
|
server.registerRoutes(mockRouteHandler);
|
|
|
|
expect(setupRoutesMock).toHaveBeenCalledTimes(1);
|
|
expect(setupRoutesMock).toHaveBeenCalledWith(server.app);
|
|
});
|
|
|
|
it('should register multiple route handlers', () => {
|
|
server = new Server(mockOptions);
|
|
|
|
const handler1Mock = mock(() => {});
|
|
const handler2Mock = mock(() => {});
|
|
|
|
server.registerRoutes({ setupRoutes: handler1Mock });
|
|
server.registerRoutes({ setupRoutes: handler2Mock });
|
|
|
|
expect(handler1Mock).toHaveBeenCalledTimes(1);
|
|
expect(handler2Mock).toHaveBeenCalledTimes(1);
|
|
});
|
|
});
|
|
});
|