Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import * as Sentry from '@sentry/cloudflare';
import type { GoogleGenAIClient } from '@sentry/core';
import { MockGoogleGenAI } from './mocks';

interface Env {
SENTRY_DSN: string;
}

const mockClient = new MockGoogleGenAI({
apiKey: 'mock-api-key',
});

const client: GoogleGenAIClient = Sentry.instrumentGoogleGenAIClient(mockClient);

export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
}),
{
async fetch(_request, _env, _ctx) {
// Test 1: chats.create and sendMessage flow
const chat = client.chats.create({
model: 'gemini-1.5-pro',
config: {
temperature: 0.8,
topP: 0.9,
maxOutputTokens: 150,
},
history: [
{
role: 'user',
parts: [{ text: 'Hello, how are you?' }],
},
],
});

const chatResponse = await chat.sendMessage({
message: 'Tell me a joke',
});

// Test 2: models.generateContent
const modelResponse = await client.models.generateContent({
model: 'gemini-1.5-flash',
config: {
temperature: 0.7,
topP: 0.9,
maxOutputTokens: 100,
},
contents: [
{
role: 'user',
parts: [{ text: 'What is the capital of France?' }],
},
],
});

return new Response(JSON.stringify({ chatResponse, modelResponse }));
},
},
);
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import type { GoogleGenAIChat, GoogleGenAIClient, GoogleGenAIResponse } from '@sentry/core';

export class MockGoogleGenAI implements GoogleGenAIClient {
public models: {
generateContent: (...args: unknown[]) => Promise<GoogleGenAIResponse>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
generateContentStream: (...args: unknown[]) => Promise<AsyncGenerator<GoogleGenAIResponse, any, unknown>>;
};
public chats: {
create: (...args: unknown[]) => GoogleGenAIChat;
};
public apiKey: string;

public constructor(config: { apiKey: string }) {
this.apiKey = config.apiKey;

// models.generateContent functionality
this.models = {
generateContent: async (...args: unknown[]) => {
const params = args[0] as { model: string; contents?: unknown };
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

if (params.model === 'error-model') {
const error = new Error('Model not found');
(error as unknown as { status: number }).status = 404;
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
throw error;
}

return {
candidates: [
{
content: {
parts: [
{
text: 'Hello from Google GenAI mock!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
};
},
generateContentStream: async () => {
// Return a promise that resolves to an async generator
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};

// chats.create implementation
this.chats = {
create: (...args: unknown[]) => {
const params = args[0] as { model: string; config?: Record<string, unknown> };
const model = params.model;

return {
modelVersion: model,
sendMessage: async (..._messageArgs: unknown[]) => {
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

return {
candidates: [
{
content: {
parts: [
{
text: 'This is a joke from the chat!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
modelVersion: model, // Include model version in response
};
},
sendMessageStream: async () => {
// Return a promise that resolves to an async generator
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming chat response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};
},
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import { expect, it } from 'vitest';
import { createRunner } from '../../../runner';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not break in our
// cloudflare SDK.

it('traces Google GenAI chat creation and message sending', async () => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
const transactionEvent = envelope[1]?.[0]?.[1] as any;

expect(transactionEvent.transaction).toBe('GET /');
expect(transactionEvent.spans).toEqual(
expect.arrayContaining([
// First span - chats.create
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.request.temperature': 0.8,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 150,
}),
description: 'chat gemini-1.5-pro create',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
}),
// Second span - chat.sendMessage
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
}),
// Third span - models.generateContent
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-flash',
'gen_ai.request.temperature': 0.7,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 100,
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'models gemini-1.5-flash',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
}),
]),
);
})
.start();
await runner.makeRequest('get', '/');
await runner.completed();
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"name": "worker-name",
"compatibility_date": "2025-06-17",
"main": "index.ts",
"compatibility_flags": ["nodejs_compat"],
}
1 change: 1 addition & 0 deletions packages/cloudflare/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ export {
// eslint-disable-next-line deprecation/deprecation
inboundFiltersIntegration,
instrumentOpenAiClient,
instrumentGoogleGenAIClient,
instrumentAnthropicAiClient,
eventFiltersIntegration,
linkedErrorsIntegration,
Expand Down
1 change: 1 addition & 0 deletions packages/core/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ export { instrumentAnthropicAiClient } from './utils/anthropic-ai';
export { ANTHROPIC_AI_INTEGRATION_NAME } from './utils/anthropic-ai/constants';
export { instrumentGoogleGenAIClient } from './utils/google-genai';
export { GOOGLE_GENAI_INTEGRATION_NAME } from './utils/google-genai/constants';
export type { GoogleGenAIResponse } from './utils/google-genai/types';
export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types';
export type {
AnthropicAiClient,
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/utils/google-genai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -292,10 +292,10 @@ function createDeepProxy<T extends object>(target: T, currentPath = '', options:
*
* @example
* ```typescript
* import { GoogleGenerativeAI } from '@google/genai';
* import { GoogleGenAI } from '@google/genai';
* import { instrumentGoogleGenAIClient } from '@sentry/core';
*
* const genAI = new GoogleGenerativeAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY });
* const genAI = new GoogleGenAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY });
* const instrumentedClient = instrumentGoogleGenAIClient(genAI);
*
* // Now both chats.create and sendMessage will be instrumented
Expand Down
1 change: 1 addition & 0 deletions packages/vercel-edge/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ export {
// eslint-disable-next-line deprecation/deprecation
inboundFiltersIntegration,
instrumentOpenAiClient,
instrumentGoogleGenAIClient,
instrumentAnthropicAiClient,
eventFiltersIntegration,
linkedErrorsIntegration,
Expand Down