diff --git a/.cursor/rules/bun-test-mocks.mdc b/.cursor/rules/bun-test-mocks.mdc index 73537dbd..a5440bfa 100644 --- a/.cursor/rules/bun-test-mocks.mdc +++ b/.cursor/rules/bun-test-mocks.mdc @@ -7,11 +7,152 @@ alwaysApply: false This document outlines the standards for writing tests using Bun's testing framework in this repository. +## HTTP Request Mocking with MSW + +### Prefer MSW Over fetch Mocking + +**Always use MSW (Mock Service Worker) for mocking HTTP requests** instead of `spyOn(globalThis, 'fetch')`. + +MSW is already configured globally in `bun.test.setup.ts`, so you don't need to set up the server in individual test files. + +#### Adding Mock Handlers + +Add your mock endpoints to `mocks/handlers.ts`: + +```typescript +import { http, HttpResponse } from 'msw'; + +export const handlers = [ + http.get('https://api.example.com/endpoint', () => { + return HttpResponse.json({ data: 'mock response' }); + }), + // ... other handlers +]; +``` + +#### Overriding Handlers in Tests + +Use `server.use()` to override handlers for specific test cases: + +```typescript +import { http, HttpResponse } from 'msw'; +import { server } from '../../../mocks/node'; + +it('handles error responses', async () => { + server.use( + http.get('https://api.example.com/endpoint', () => { + return HttpResponse.json({ error: 'Not found' }, { status: 404 }); + }) + ); + + // Your test code here +}); +``` + +The global `afterEach` hook in `bun.test.setup.ts` automatically calls `server.resetHandlers()` to reset overrides. + +#### Verifying Requests + +Use MSW's event listeners to verify that requests were made: + +```typescript +it('makes the expected request', async () => { + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); + }; + server.events.on('request:start', listener); + + // Make your request + await someFunction(); + + expect(recordedRequests).toHaveLength(1); + expect(recordedRequests[0]?.url).toBe('https://api.example.com/endpoint'); + + server.events.removeListener('request:start', listener); +}); +``` + +**Important:** +- Do NOT use `spyOn(globalThis, 'fetch')` for HTTP mocking - use MSW instead +- Do NOT add `beforeAll`/`afterAll`/`afterEach` for MSW setup in test files - it's already configured globally +- MSW handlers are reset after each test automatically +- For tests that need to run their own servers (e.g., MCP servers), you may need to temporarily close and restart MSW + +## File System Testing + +### Use fs-fixture for File System Operations + +For tests that need to work with the file system, use [fs-fixture](https://github.com/privatenumber/fs-fixture) to create temporary test directories with automatic cleanup. + +**IMPORTANT: Always use `await using` syntax - do NOT manually manage fixture lifecycle.** + +**Basic Usage:** + +```typescript +import { createFixture } from 'fs-fixture'; + +it('should save file to disk', async () => { + // Using 'await using' ensures automatic cleanup + await using fixture = await createFixture(); + + // Write files using fixture methods + await fixture.writeFile('data.json', JSON.stringify({ test: 'data' })); + + // Verify and read + expect(await fixture.exists('data.json')).toBe(true); + const content = await fixture.readFile('data.json', 'utf8'); + expect(JSON.parse(content)).toEqual({ test: 'data' }); +}); +``` + +**Pre-populated Fixtures:** + +```typescript +it('should read existing files', async () => { + await using fixture = await createFixture({ + 'config.json': JSON.stringify({ setting: 'value' }), + 'data/file.txt': 'content', + }); + + // Files are already created at fixture.path + const result = await processConfig(fixture.path); + expect(result).toBeDefined(); +}); +``` + +**Type-Safe JSON:** + +```typescript +interface Config { name: string; version: string; } + +await using fixture = await createFixture(); +await fixture.writeJson('config.json', { name: 'test', version: '1.0.0' }, 2); +const config = await fixture.readJson('config.json'); +``` + +**Key Methods:** +- `fixture.path` - Absolute path to fixture directory +- `fixture.exists(path)` - Check if file/directory exists +- `fixture.readFile(path, encoding?)` - Read file +- `fixture.writeFile(path, content)` - Write file +- `fixture.readJson(path)` - Parse JSON with types +- `fixture.writeJson(path, data, space?)` - Write formatted JSON +- `fixture.mkdir(path)` - Create directories + +**Benefits:** +- Automatic cleanup with `await using` - no manual cleanup needed +- Zero dependencies, lightweight (1.1 kB) +- Type-safe JSON operations +- Isolated test environments + +**Reference:** See [fs-fixture documentation](https://github.com/privatenumber/fs-fixture) for advanced usage and API details + ## Mocking Best Practices ### Use Proper Mocking Functions -Always use the appropriate mocking functions from Bun's testing framework: +For non-HTTP mocking, use the appropriate mocking functions from Bun's testing framework: - Use `spyOn()` to track calls to existing functions without replacing their implementation - Use `mock()` to create standalone mock functions diff --git a/CLAUDE.md b/CLAUDE.md index 723d80f4..cfa6322b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -81,11 +81,166 @@ Use this approach to keep the union definition (`type HttpBodyType = 'json' | 'm ### Testing Strategy Tests use Bun's built-in test runner with a Jest-compatible API. Key patterns: -- Mock HTTP requests using `bun:test` mock functions +- **MSW (Mock Service Worker)** for HTTP request mocking - preferred over `spyOn(globalThis, 'fetch')` - Snapshot testing for generated outputs - Comprehensive unit tests for parsing logic - Integration tests with example usage +#### Testing with MSW + +The project uses [MSW](https://mswjs.io/) for mocking HTTP requests in tests. MSW is set up globally in `bun.test.setup.ts`, so you don't need to set up the server in individual test files. + +**Adding Mock Handlers:** + +Add your mock endpoints to `mocks/handlers.ts`: + +```typescript +import { http, HttpResponse } from 'msw'; + +export const handlers = [ + http.get('https://api.example.com/endpoint', () => { + return HttpResponse.json({ data: 'mock response' }); + }), + // ... other handlers +]; +``` + +**Overriding Handlers in Tests:** + +Use `server.use()` to override handlers for specific test cases: + +```typescript +import { http, HttpResponse } from 'msw'; +import { server } from '../../../mocks/node'; + +it('handles error responses', async () => { + server.use( + http.get('https://api.example.com/endpoint', () => { + return HttpResponse.json({ error: 'Not found' }, { status: 404 }); + }) + ); + + // Your test code here +}); +``` + +The global `afterEach` hook in `bun.test.setup.ts` automatically calls `server.resetHandlers()` to reset overrides. + +**Verifying Requests:** + +Use MSW's event listeners to verify that requests were made: + +```typescript +it('makes the expected request', async () => { + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); + }; + server.events.on('request:start', listener); + + // Make your request + await someFunction(); + + expect(recordedRequests).toHaveLength(1); + expect(recordedRequests[0]?.url).toBe('https://api.example.com/endpoint'); + + server.events.removeListener('request:start', listener); +}); +``` + +**Important Notes:** +- Do NOT use `spyOn(globalThis, 'fetch')` - use MSW instead for consistent mocking +- Do NOT add `beforeAll`/`afterAll`/`afterEach` for MSW setup in test files - it's already configured globally +- MSW handlers are reset after each test automatically +- For tests that need to run their own servers (e.g., MCP servers), you may need to temporarily close and restart MSW + +#### Testing File System Operations + +For tests that need to work with the file system, use [`fs-fixture`](https://github.com/privatenumber/fs-fixture) to create temporary test directories that are automatically cleaned up. + +**IMPORTANT: Always use `await using` syntax with fs-fixture to ensure automatic cleanup. Do NOT manually manage fixture lifecycle with `.rm()`.** + +**Basic Usage:** + +```typescript +import { createFixture } from 'fs-fixture'; + +it('should save file to disk', async () => { + // Using the 'await using' syntax ensures automatic cleanup + await using fixture = await createFixture(); + + // Write files using fixture methods + await fixture.writeFile('data.json', JSON.stringify({ test: 'data' })); + + // Verify the file exists + expect(await fixture.exists('data.json')).toBe(true); + + // Read the file back + const content = await fixture.readFile('data.json', 'utf8'); + expect(JSON.parse(content)).toEqual({ test: 'data' }); + + // fixture is automatically cleaned up when the test completes +}); +``` + +**Creating Pre-populated Fixtures:** + +```typescript +it('should read existing files', async () => { + await using fixture = await createFixture({ + 'config.json': JSON.stringify({ setting: 'value' }), + 'data/file.txt': 'content', + 'src/index.ts': 'export const test = "hello";', + }); + + // Files are already created at fixture.path + const result = await processConfig(fixture.path); + expect(result).toBeDefined(); +}); +``` + +**Type-Safe JSON Operations:** + +```typescript +it('should handle JSON with type safety', async () => { + await using fixture = await createFixture(); + + interface Config { + name: string; + version: string; + } + + // Write JSON with formatting + await fixture.writeJson('config.json', { name: 'test', version: '1.0.0' }, 2); + + // Read JSON with type inference + const config = await fixture.readJson('config.json'); + expect(config.name).toBe('test'); +}); +``` + +**Available Methods:** +- `fixture.path` - Absolute path to fixture directory +- `fixture.getPath(...paths)` - Build paths relative to fixture root +- `fixture.exists(path)` - Check if file/directory exists +- `fixture.readFile(path, encoding?)` - Read file as string or Buffer +- `fixture.writeFile(path, content)` - Write file content +- `fixture.readJson(path)` - Parse JSON with type safety +- `fixture.writeJson(path, data, space?)` - Write formatted JSON +- `fixture.mkdir(path)` - Create nested directories +- `fixture.readdir(path, options?)` - List directory contents +- `fixture.cp(source, dest?)` - Copy files into fixture + +**Benefits:** +- Automatic cleanup with TypeScript 5.2+ `await using` declarations - no manual cleanup needed +- Isolated test environment - each test gets its own temporary directory +- Zero dependencies, lightweight (1.1 kB gzipped) +- Type-safe JSON operations with generics +- Works seamlessly with Bun's test runner + +**Learn More:** +- [fs-fixture Documentation](https://github.com/privatenumber/fs-fixture) - Official documentation with advanced usage patterns and API reference + ### Development Workflow 1. OpenAPI specs are fetched from remote sources and stored in `specs/` diff --git a/mocks/handlers.ts b/mocks/handlers.ts index ea5541c9..16ad2fbe 100644 --- a/mocks/handlers.ts +++ b/mocks/handlers.ts @@ -96,6 +96,17 @@ export const handlers = [ return HttpResponse.json(body); }), + // StackOne AI tool feedback endpoint + http.post('https://api.stackone.com/ai/tool-feedback', async ({ request }) => { + const body = await request.json(); + return HttpResponse.json({ + message: 'Feedback successfully stored', + key: 'test-key.json', + submitted_at: new Date().toISOString(), + trace_id: 'test-trace-id', + }); + }), + // Default handler for unmatched requests http.get('*', () => { return HttpResponse.json({ message: 'Mock endpoint' }); diff --git a/src/tools/feedback.ts b/src/tools/feedback.ts index 13a7239f..e95e4f4e 100644 --- a/src/tools/feedback.ts +++ b/src/tools/feedback.ts @@ -30,7 +30,10 @@ const feedbackInputSchema = z.object({ tool_names: z .array(z.string()) .min(1, 'At least one tool name is required') - .transform((value) => value.map((item) => item.trim()).filter((item) => item.length > 0)), + .transform((value) => value.map((item) => item.trim()).filter((item) => item.length > 0)) + .refine((value) => value.length > 0, { + message: 'Tool names must contain at least one non-empty string', + }), }); export function createFeedbackTool( diff --git a/src/tools/tests/feedback.spec.ts b/src/tools/tests/feedback.spec.ts index ec4544b4..7419b153 100644 --- a/src/tools/tests/feedback.spec.ts +++ b/src/tools/tests/feedback.spec.ts @@ -1,11 +1,9 @@ -import { afterAll, beforeEach, describe, expect, it, spyOn } from 'bun:test'; +import { describe, expect, it } from 'bun:test'; +import { http, HttpResponse } from 'msw'; +import { server } from '../../../mocks/node'; import { StackOneError } from '../../utils/errors'; import { createFeedbackTool } from '../feedback'; -beforeEach(() => { - // Clear any mocks before each test -}); - describe('meta_collect_tool_feedback', () => { describe('validation tests', () => { it('test_missing_required_fields', async () => { @@ -84,9 +82,11 @@ describe('meta_collect_tool_feedback', () => { it('test_json_string_input', async () => { const tool = createFeedbackTool(); - const apiResponse = { message: 'Success' }; - const response = new Response(JSON.stringify(apiResponse), { status: 200 }); - const fetchSpy = spyOn(globalThis, 'fetch').mockResolvedValue(response); + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); + }; + server.events.on('request:start', listener); // Test JSON string input const jsonInput = JSON.stringify({ @@ -97,28 +97,25 @@ describe('meta_collect_tool_feedback', () => { const result = await tool.execute(jsonInput); - expect(fetchSpy).toHaveBeenCalledTimes(1); + expect(recordedRequests).toHaveLength(1); expect(result).toMatchObject({ message: 'Feedback sent to 1 account(s)', total_accounts: 1, successful: 1, failed: 0, }); - fetchSpy.mockRestore(); + server.events.removeListener('request:start', listener); }); }); describe('execution tests', () => { it('test_single_account_execution', async () => { const tool = createFeedbackTool(); - const apiResponse = { - message: 'Feedback successfully stored', - key: 'test-key.json', - submitted_at: '2025-10-08T11:44:16.123Z', - trace_id: 'test-trace-id', + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); }; - const response = new Response(JSON.stringify(apiResponse), { status: 200 }); - const fetchSpy = spyOn(globalThis, 'fetch').mockResolvedValue(response); + server.events.on('request:start', listener); const result = await tool.execute({ feedback: 'Great tools!', @@ -126,10 +123,9 @@ describe('meta_collect_tool_feedback', () => { tool_names: ['data_export', 'analytics'], }); - expect(fetchSpy).toHaveBeenCalledTimes(1); - const [calledUrl, options] = fetchSpy.mock.calls[0]; - expect(calledUrl).toBe('https://api.stackone.com/ai/tool-feedback'); - expect(options).toMatchObject({ method: 'POST' }); + expect(recordedRequests).toHaveLength(1); + expect(recordedRequests[0]?.url).toBe('https://api.stackone.com/ai/tool-feedback'); + expect(recordedRequests[0]?.method).toBe('POST'); expect(result).toMatchObject({ message: 'Feedback sent to 1 account(s)', total_accounts: 1, @@ -139,16 +135,18 @@ describe('meta_collect_tool_feedback', () => { expect(result.results[0]).toMatchObject({ account_id: 'acc_123456', status: 'success', - result: apiResponse, }); - fetchSpy.mockRestore(); + expect(result.results[0].result).toHaveProperty('message', 'Feedback successfully stored'); + server.events.removeListener('request:start', listener); }); it('test_call_method_interface', async () => { const tool = createFeedbackTool(); - const apiResponse = { message: 'Success' }; - const response = new Response(JSON.stringify(apiResponse), { status: 200 }); - const fetchSpy = spyOn(globalThis, 'fetch').mockResolvedValue(response); + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); + }; + server.events.on('request:start', listener); // Test using the tool directly (equivalent to .call() in Python) const result = await tool.execute({ @@ -157,22 +155,25 @@ describe('meta_collect_tool_feedback', () => { tool_names: ['test_tool'], }); - expect(fetchSpy).toHaveBeenCalledTimes(1); + expect(recordedRequests).toHaveLength(1); expect(result).toMatchObject({ message: 'Feedback sent to 1 account(s)', total_accounts: 1, successful: 1, failed: 0, }); - fetchSpy.mockRestore(); + server.events.removeListener('request:start', listener); }); it('test_api_error_handling', async () => { const tool = createFeedbackTool(); - const errorResponse = new Response(JSON.stringify({ error: 'Unauthorized' }), { - status: 401, - }); - const fetchSpy = spyOn(globalThis, 'fetch').mockResolvedValue(errorResponse); + + // Override the default handler to return an error + server.use( + http.post('https://api.stackone.com/ai/tool-feedback', () => { + return HttpResponse.json({ error: 'Unauthorized' }, { status: 401 }); + }) + ); await expect( tool.execute({ @@ -181,18 +182,17 @@ describe('meta_collect_tool_feedback', () => { tool_names: ['test_tool'], }) ).rejects.toBeInstanceOf(StackOneError); - - fetchSpy.mockRestore(); }); it('test_multiple_account_ids_execution', async () => { const tool = createFeedbackTool(); // Test all accounts succeed - const successResponse = { message: 'Success' }; - const fetchSpy = spyOn(globalThis, 'fetch').mockImplementation(() => - Promise.resolve(new Response(JSON.stringify(successResponse), { status: 200 })) - ); + const recordedRequests: Request[] = []; + const listener = ({ request }: { request: Request }) => { + recordedRequests.push(request); + }; + server.events.on('request:start', listener); const result = await tool.execute({ feedback: 'Great tools!', @@ -200,23 +200,26 @@ describe('meta_collect_tool_feedback', () => { tool_names: ['test_tool'], }); - expect(fetchSpy).toHaveBeenCalledTimes(3); + expect(recordedRequests).toHaveLength(3); expect(result).toMatchObject({ message: 'Feedback sent to 3 account(s)', total_accounts: 3, successful: 3, failed: 0, }); - fetchSpy.mockRestore(); + server.events.removeListener('request:start', listener); // Test mixed success/error scenario - const mixedFetchSpy = spyOn(globalThis, 'fetch') - .mockImplementationOnce(() => - Promise.resolve(new Response(JSON.stringify({ message: 'Success' }), { status: 200 })) - ) - .mockImplementationOnce(() => - Promise.resolve(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })) - ); + let callCount = 0; + server.use( + http.post('https://api.stackone.com/ai/tool-feedback', () => { + callCount++; + if (callCount === 1) { + return HttpResponse.json({ message: 'Success' }); + } + return HttpResponse.json({ error: 'Unauthorized' }, { status: 401 }); + }) + ); const mixedResult = await tool.execute({ feedback: 'Great tools!', @@ -224,7 +227,7 @@ describe('meta_collect_tool_feedback', () => { tool_names: ['test_tool'], }); - expect(mixedFetchSpy).toHaveBeenCalledTimes(2); + expect(callCount).toBe(2); expect(mixedResult).toMatchObject({ message: 'Feedback sent to 2 account(s)', total_accounts: 2, @@ -249,7 +252,6 @@ describe('meta_collect_tool_feedback', () => { status: 'error', error: '{"error":"Unauthorized"}', }); - mixedFetchSpy.mockRestore(); }); it('test_tool_integration', async () => { @@ -313,7 +315,3 @@ describe('meta_collect_tool_feedback', () => { }); }); }); - -afterAll(() => { - // Cleanup -});