Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .size-limit.js
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ module.exports = [
path: 'packages/browser/build/npm/esm/index.js',
import: createImport('init', 'feedbackAsyncIntegration'),
gzip: true,
limit: '34 KB',
limit: '35 KB',
},
// React SDK (ESM)
{
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import * as Sentry from '@sentry/browser';

window.Sentry = Sentry;

Sentry.init({
dsn: 'https://[email protected]/1337',
tracesSampleRate: 1,
debug: true,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
// Mock Anthropic client for browser testing
export class MockAnthropic {
constructor(config) {
this.apiKey = config.apiKey;

// Main focus: messages.create functionality
this.messages = {
create: async (...args) => {
const params = args[0];
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

if (params.model === 'error-model') {
const error = new Error('Model not found');
error.status = 404;
error.headers = { 'x-request-id': 'mock-request-123' };
throw error;
}

const response = {
id: 'msg_mock123',
type: 'message',
role: 'assistant',
model: params.model,
content: [
{
type: 'text',
text: 'Hello from Anthropic mock!',
},
],
stop_reason: 'end_turn',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 15,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
},
};
return response;
},
countTokens: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock', input_tokens: 0 }),
};

// Minimal implementations for required interface compliance
this.models = {
list: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
get: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
};

this.completions = {
create: async (..._args) => ({ id: 'mock', type: 'completion', model: 'mock' }),
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import { instrumentAnthropicAiClient } from '@sentry/browser';
import { MockAnthropic } from './mocks.js';

const mockClient = new MockAnthropic({
apiKey: 'mock-api-key',
});
Comment on lines +4 to +6
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

m: It isn't ideal for an integration test to mock the client we want to instrument. In our Node integration tests we usually test against the actual libraries (though not sure about AI packages to be fair).

I'm not sure if this works out of the box but we can try adding the packages as dependencies to the browser-integration-tests package.json and then just importing them in the subject files. We bundle each individual integration test suite via webpack so maybe webpack can actually resolve and bundle the dependencies out of the box. If this doesn't work, we should find a way to make this possible. I'm sure this is also a concern for logging libraries used in browser apps.

I'm fine though with merging the PR as-is and following up with replacing the mocks with the actual test apps. Whatever works for you better :)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We mock the server apis for AI tests usually, yep can do in a follow up, will draft a ticket for this.


const client = instrumentAnthropicAiClient(mockClient);

// Test that manual instrumentation doesn't crash the browser
// The instrumentation automatically creates spans
const response = await client.messages.create({
model: 'claude-3-haiku-20240307',
messages: [{ role: 'user', content: 'What is the capital of France?' }],
temperature: 0.7,
max_tokens: 100,
});

console.log('Received response', response);
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import { expect } from '@playwright/test';
import { sentryTest } from '../../../../utils/fixtures';
import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not crash in the browser
// and that gen_ai transactions are sent.

sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
const transactionPromise = waitForTransactionRequest(page, event => {
return !!event.transaction?.includes('claude-3-haiku-20240307');
});

const url = await getLocalTestUrl({ testDir: __dirname });
await page.goto(url);

const req = await transactionPromise;

const eventData = envelopeRequestParser(req);

// Verify it's a gen_ai transaction
expect(eventData.transaction).toBe('messages claude-3-haiku-20240307');
expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages');
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic');
expect(eventData.contexts?.trace?.data).toMatchObject({
'gen_ai.operation.name': 'messages',
'gen_ai.system': 'anthropic',
'gen_ai.request.model': 'claude-3-haiku-20240307',
'gen_ai.request.temperature': 0.7,
'gen_ai.response.model': 'claude-3-haiku-20240307',
'gen_ai.response.id': 'msg_mock123',
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 15,
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import * as Sentry from '@sentry/browser';

window.Sentry = Sentry;

Sentry.init({
dsn: 'https://[email protected]/1337',
tracesSampleRate: 1,
debug: true,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
// Mock Google GenAI client for browser testing
export class MockGoogleGenAI {
constructor(config) {
this.apiKey = config.apiKey;

// models.generateContent functionality
this.models = {
generateContent: async (...args) => {
const params = args[0];
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

if (params.model === 'error-model') {
const error = new Error('Model not found');
error.status = 404;
error.headers = { 'x-request-id': 'mock-request-123' };
throw error;
}

return {
candidates: [
{
content: {
parts: [
{
text: 'Hello from Google GenAI mock!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
};
},
generateContentStream: async () => {
// Return a promise that resolves to an async generator
return (async function* () {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};

// chats.create implementation
this.chats = {
create: (...args) => {
const params = args[0];
const model = params.model;

return {
modelVersion: model,
sendMessage: async (..._messageArgs) => {
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

const response = {
candidates: [
{
content: {
parts: [
{
text: 'This is a joke from the chat!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
modelVersion: model, // Include model version in response
};
return response;
},
sendMessageStream: async () => {
// Return a promise that resolves to an async generator
return (async function* () {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming chat response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};
},
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import { instrumentGoogleGenAIClient } from '@sentry/browser';
import { MockGoogleGenAI } from './mocks.js';

const mockClient = new MockGoogleGenAI({
apiKey: 'mock-api-key',
});

const client = instrumentGoogleGenAIClient(mockClient);

// Test that manual instrumentation doesn't crash the browser
// The instrumentation automatically creates spans
// Test both chats and models APIs
const chat = client.chats.create({
model: 'gemini-1.5-pro',
config: {
temperature: 0.8,
topP: 0.9,
maxOutputTokens: 150,
},
history: [
{
role: 'user',
parts: [{ text: 'Hello, how are you?' }],
},
],
});

const response = await chat.sendMessage({
message: 'Tell me a joke',
});

console.log('Received response', response);
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import { expect } from '@playwright/test';
import { sentryTest } from '../../../../utils/fixtures';
import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not crash in the browser
// and that gen_ai transactions are sent.

sentryTest('manual Google GenAI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
const transactionPromise = waitForTransactionRequest(page, event => {
return !!event.transaction?.includes('gemini-1.5-pro');
});

const url = await getLocalTestUrl({ testDir: __dirname });
await page.goto(url);

const req = await transactionPromise;

const eventData = envelopeRequestParser(req);

// Verify it's a gen_ai transaction
expect(eventData.transaction).toBe('chat gemini-1.5-pro create');
expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat');
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.google_genai');
expect(eventData.contexts?.trace?.data).toMatchObject({
'gen_ai.operation.name': 'chat',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import * as Sentry from '@sentry/browser';

window.Sentry = Sentry;

Sentry.init({
dsn: 'https://[email protected]/1337',
tracesSampleRate: 1,
debug: true,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Mock OpenAI client for browser testing
export class MockOpenAi {
constructor(config) {
this.apiKey = config.apiKey;

this.chat = {
completions: {
create: async (...args) => {
const params = args[0];
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

if (params.model === 'error-model') {
const error = new Error('Model not found');
error.status = 404;
error.headers = { 'x-request-id': 'mock-request-123' };
throw error;
}

const response = {
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: params.model,
system_fingerprint: 'fp_44709d6fcb',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello from OpenAI mock!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
};
return response;
},
},
};
}
}
Loading
Loading