diff --git a/.changeset/flat-buttons-shave.md b/.changeset/flat-buttons-shave.md new file mode 100644 index 00000000..6cf0a78a --- /dev/null +++ b/.changeset/flat-buttons-shave.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-gemini': patch +--- + +Fix thinking output for Gemini Text adapter diff --git a/.changeset/goofy-cities-push.md b/.changeset/goofy-cities-push.md new file mode 100644 index 00000000..450f6b7f --- /dev/null +++ b/.changeset/goofy-cities-push.md @@ -0,0 +1,6 @@ +--- +'@tanstack/ai-gemini': patch +'@tanstack/ai': patch +--- + +fixed an issue with gemini and thought chunks processing diff --git a/examples/ts-react-chat/src/lib/model-selection.ts b/examples/ts-react-chat/src/lib/model-selection.ts index 4d40ccc7..7512e147 100644 --- a/examples/ts-react-chat/src/lib/model-selection.ts +++ b/examples/ts-react-chat/src/lib/model-selection.ts @@ -37,8 +37,8 @@ export const MODEL_OPTIONS: Array = [ }, { provider: 'gemini', - model: 'gemini-exp-1206', - label: 'Gemini - Exp 1206 (Pro)', + model: 'gemini-2.5-flash', + label: 'Gemini 2.5 - Flash', }, // Ollama diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index a7db5202..68d8e69f 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -90,10 +90,17 @@ export const Route = createFileRoute('/api/tanchat')({ adapter: geminiText( (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', ), + modelOptions: { + thinkingConfig: { + includeThoughts: true, + thinkingBudget: 100, + }, + }, }), grok: () => createChatOptions({ adapter: grokText((model || 'grok-3') as 'grok-3'), + modelOptions: {}, }), ollama: () => createChatOptions({ diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index 97791df5..302409f8 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -20,6 +20,7 @@ import type { GenerateContentResponse, GoogleGenAI, Part, + ThinkingLevel, } from '@google/genai' import type { ContentPart, @@ -212,15 +213,26 @@ export class GeminiTextAdapter< for (const part of parts) { if (part.text) { - accumulatedContent += part.text - yield { - type: 'content', - id: generateId(this.name), - model, - timestamp, - delta: part.text, - content: accumulatedContent, - role: 'assistant', + if (part.thought) { + yield { + type: 'thinking', + content: part.text, + delta: part.text, + id: generateId(this.name), + model, + timestamp, + } + } else { + accumulatedContent += part.text + yield { + type: 'content', + id: generateId(this.name), + model, + timestamp, + delta: part.text, + content: accumulatedContent, + role: 'assistant', + } } } @@ -476,19 +488,29 @@ export class GeminiTextAdapter< }) } - private mapCommonOptionsToGemini(options: TextOptions) { - const providerOpts = options.modelOptions + private mapCommonOptionsToGemini( + options: TextOptions, + ) { + const modelOpts = options.modelOptions + const thinkingConfig = modelOpts?.thinkingConfig const requestOptions: GenerateContentParameters = { model: options.model, contents: this.formatMessages(options.messages), config: { - ...providerOpts, + ...modelOpts, temperature: options.temperature, topP: options.topP, maxOutputTokens: options.maxTokens, + thinkingConfig: thinkingConfig + ? { + ...thinkingConfig, + thinkingLevel: thinkingConfig.thinkingLevel + ? // Enum is provided by the SDK, we use it for the type but cast it to string constants, here we just cast them back + (thinkingConfig.thinkingLevel as ThinkingLevel) + : undefined, + } + : undefined, systemInstruction: options.systemPrompts?.join('\n'), - ...((providerOpts as Record | undefined) - ?.generationConfig as Record | undefined), tools: convertToolsToProviderFormat(options.tools), }, } diff --git a/packages/typescript/ai-gemini/src/model-meta.ts b/packages/typescript/ai-gemini/src/model-meta.ts index 9110c74e..54761a02 100644 --- a/packages/typescript/ai-gemini/src/model-meta.ts +++ b/packages/typescript/ai-gemini/src/model-meta.ts @@ -1,8 +1,9 @@ import type { GeminiCachedContentOptions, - GeminiGenerationConfigOptions, + GeminiCommonConfigOptions, GeminiSafetyOptions, GeminiStructuredOutputOptions, + GeminiThinkingAdvancedOptions, GeminiThinkingOptions, GeminiToolConfigOptions, } from './text/text-provider-options' @@ -77,10 +78,11 @@ const GEMINI_3_PRO = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & - GeminiThinkingOptions + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions > const GEMINI_3_FLASH = { @@ -114,10 +116,11 @@ const GEMINI_3_FLASH = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & - GeminiThinkingOptions + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions > const GEMINI_3_PRO_IMAGE = { @@ -147,10 +150,11 @@ const GEMINI_3_PRO_IMAGE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & - GeminiThinkingOptions + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions > const GEMINI_2_5_PRO = { @@ -185,7 +189,7 @@ const GEMINI_2_5_PRO = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions @@ -212,7 +216,7 @@ const GEMINI_2_5_PRO_TTS = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > @@ -248,7 +252,7 @@ const GEMINI_2_5_FLASH = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions @@ -285,7 +289,7 @@ const GEMINI_2_5_FLASH_PREVIEW = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions @@ -318,7 +322,7 @@ const GEMINI_2_5_FLASH_IMAGE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > /** @@ -377,7 +381,7 @@ const GEMINI_2_5_FLASH_TTS = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > @@ -412,7 +416,7 @@ const GEMINI_2_5_FLASH_LITE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions @@ -448,7 +452,7 @@ const GEMINI_2_5_FLASH_LITE_PREVIEW = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions @@ -484,7 +488,7 @@ const GEMINI_2_FLASH = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions > @@ -515,7 +519,7 @@ const GEMINI_2_FLASH_IMAGE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > /* @@ -579,7 +583,7 @@ const GEMINI_2_FLASH_LITE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions > @@ -603,7 +607,7 @@ const IMAGEN_4_GENERATE = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > @@ -626,7 +630,7 @@ const IMAGEN_4_GENERATE_ULTRA = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > @@ -649,7 +653,7 @@ const IMAGEN_4_GENERATE_FAST = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > @@ -671,7 +675,7 @@ const IMAGEN_3 = { } as const satisfies ModelMeta< GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions > /** @@ -909,55 +913,57 @@ export type GeminiChatModelProviderOptionsByName = { // Models with thinking and structured output support [GEMINI_3_PRO.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & - GeminiThinkingOptions + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions [GEMINI_3_FLASH.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & - GeminiThinkingOptions + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions [GEMINI_2_5_PRO.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions [GEMINI_2_5_FLASH.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions [GEMINI_2_5_FLASH_PREVIEW.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions [GEMINI_2_5_FLASH_LITE.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions [GEMINI_2_5_FLASH_LITE_PREVIEW.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions & GeminiThinkingOptions // Models with structured output but no thinking support [GEMINI_2_FLASH.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions [GEMINI_2_FLASH_LITE.name]: GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & + GeminiCommonConfigOptions & GeminiCachedContentOptions & GeminiStructuredOutputOptions } diff --git a/packages/typescript/ai-gemini/src/text/text-provider-options.ts b/packages/typescript/ai-gemini/src/text/text-provider-options.ts index fe9373b8..4c53ec2a 100644 --- a/packages/typescript/ai-gemini/src/text/text-provider-options.ts +++ b/packages/typescript/ai-gemini/src/text/text-provider-options.ts @@ -22,40 +22,39 @@ This will be enforced on the GenerateContentRequest.contents and GenerateContent safetySettings?: Array } -export interface GeminiGenerationConfigOptions { +export interface GeminiCommonConfigOptions { /** * Configuration options for model generation and outputs. */ - generationConfig?: { - /** - * The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop_sequence. The stop sequence will not be included as part of the response. - */ - stopSequences?: Array - /** + /** + * The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop_sequence. The stop sequence will not be included as part of the response. + */ + stopSequences?: Array + /** * The requested modalities of the response. Represents the set of modalities that the model can return, and should be expected in the response. This is an exact match to the modalities of the response. A model may have multiple combinations of supported modalities. If the requested modalities do not match any of the supported combinations, an error will be returned. */ - responseModalities?: Array< - 'MODALITY_UNSPECIFIED' | 'TEXT' | 'IMAGE' | 'AUDIO' - > - /** - * Number of generated responses to return. If unset, this will default to 1. Please note that this doesn't work for previous generation models (Gemini 1.0 family) - */ - candidateCount?: number - /** + responseModalities?: Array< + 'MODALITY_UNSPECIFIED' | 'TEXT' | 'IMAGE' | 'AUDIO' + > + /** + * Number of generated responses to return. If unset, this will default to 1. Please note that this doesn't work for previous generation models (Gemini 1.0 family) + */ + candidateCount?: number + /** * The maximum number of tokens to consider when sampling. Gemini models use Top-p (nucleus) sampling or a combination of Top-k and nucleus sampling. Top-k sampling considers the set of topK most probable tokens. Models running with nucleus sampling don't allow topK setting. Note: The default value varies by Model and is specified by theModel.top_p attribute returned from the getModel function. An empty topK attribute indicates that the model doesn't apply top-k sampling and doesn't allow setting topK on requests. */ - topK?: number - /** - * Seed used in decoding. If not set, the request uses a randomly generated seed. - */ - seed?: number - /** + topK?: number + /** + * Seed used in decoding. If not set, the request uses a randomly generated seed. + */ + seed?: number + /** * Presence penalty applied to the next token's logprobs if the token has already been seen in the response. This penalty is binary on/off and not dependant on the number of times the token is used (after the first). Use frequencyPenalty for a penalty that increases with each use. @@ -64,107 +63,105 @@ A positive penalty will discourage the use of tokens that have already been used A negative penalty will encourage the use of tokens that have already been used in the response, decreasing the vocabulary. */ - presencePenalty?: number - /** + presencePenalty?: number + /** * Frequency penalty applied to the next token's logprobs, multiplied by the number of times each token has been seen in the respponse so far. A positive penalty will discourage the use of tokens that have already been used, proportional to the number of times the token has been used: The more a token is used, the more difficult it is for the model to use that token again increasing the vocabulary of responses. Caution: A negative penalty will encourage the model to reuse tokens proportional to the number of times the token has been used. Small negative values will reduce the vocabulary of a response. Larger negative values will cause the model to start repeating a common token until it hits the maxOutputTokens limit. */ - frequencyPenalty?: number - /** - * If true, export the logprobs results in response. - */ - responseLogprobs?: boolean + frequencyPenalty?: number + /** + * If true, export the logprobs results in response. + */ + responseLogprobs?: boolean - /** - * Only valid if responseLogprobs=True. This sets the number of top logprobs to return at each decoding step in the Candidate.logprobs_result. The number must be in the range of [0, 20]. - */ - logprobs?: number + /** + * Only valid if responseLogprobs=True. This sets the number of top logprobs to return at each decoding step in the Candidate.logprobs_result. The number must be in the range of [0, 20]. + */ + logprobs?: number - /** - * Enables enhanced civic answers. It may not be available for all models. - */ - enableEnhancedCivicAnswers?: boolean + /** + * Enables enhanced civic answers. It may not be available for all models. + */ + enableEnhancedCivicAnswers?: boolean - /** - * The speech generation config. - */ - speechConfig?: { - voiceConfig: { - prebuiltVoiceConfig: { - voiceName: string - } + /** + * The speech generation config. + */ + speechConfig?: { + voiceConfig: { + prebuiltVoiceConfig: { + voiceName: string } + } - multiSpeakerVoiceConfig?: { - speakerVoiceConfigs?: Array<{ - speaker: string - voiceConfig: { - prebuiltVoiceConfig: { - voiceName: string - } + multiSpeakerVoiceConfig?: { + speakerVoiceConfigs?: Array<{ + speaker: string + voiceConfig: { + prebuiltVoiceConfig: { + voiceName: string } - }> - } - /** + } + }> + } + /** * Language code (in BCP 47 format, e.g. "en-US") for speech synthesis. Valid values are: de-DE, en-AU, en-GB, en-IN, en-US, es-US, fr-FR, hi-IN, pt-BR, ar-XA, es-ES, fr-CA, id-ID, it-IT, ja-JP, tr-TR, vi-VN, bn-IN, gu-IN, kn-IN, ml-IN, mr-IN, ta-IN, te-IN, nl-NL, ko-KR, cmn-CN, pl-PL, ru-RU, and th-TH. */ - languageCode?: - | 'de-DE' - | 'en-AU' - | 'en-GB' - | 'en-IN' - | 'en-US' - | 'es-US' - | 'fr-FR' - | 'hi-IN' - | 'pt-BR' - | 'ar-XA' - | 'es-ES' - | 'fr-CA' - | 'id-ID' - | 'it-IT' - | 'ja-JP' - | 'tr-TR' - | 'vi-VN' - | 'bn-IN' - | 'gu-IN' - | 'kn-IN' - | 'ml-IN' - | 'mr-IN' - | 'ta-IN' - | 'te-IN' - | 'nl-NL' - | 'ko-KR' - | 'cmn-CN' - | 'pl-PL' - | 'ru-RU' - | 'th-TH' - } - /** - * Config for image generation. An error will be returned if this field is set for models that don't support these config options. - */ - imageConfig?: { - aspectRatio?: - | '1:1' - | '2:3' - | '3:2' - | '3:4' - | '4:3' - | '9:16' - | '16:9' - | '21:9' - } - /** - * If specified, the media resolution specified will be used. - */ - mediaResolution?: MediaResolution - } & GeminiThinkingOptions & - GeminiStructuredOutputOptions + languageCode?: + | 'de-DE' + | 'en-AU' + | 'en-GB' + | 'en-IN' + | 'en-US' + | 'es-US' + | 'fr-FR' + | 'hi-IN' + | 'pt-BR' + | 'ar-XA' + | 'es-ES' + | 'fr-CA' + | 'id-ID' + | 'it-IT' + | 'ja-JP' + | 'tr-TR' + | 'vi-VN' + | 'bn-IN' + | 'gu-IN' + | 'kn-IN' + | 'ml-IN' + | 'mr-IN' + | 'ta-IN' + | 'te-IN' + | 'nl-NL' + | 'ko-KR' + | 'cmn-CN' + | 'pl-PL' + | 'ru-RU' + | 'th-TH' + } + /** + * Config for image generation. An error will be returned if this field is set for models that don't support these config options. + */ + imageConfig?: { + aspectRatio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '9:16' + | '16:9' + | '21:9' + } + /** + * If specified, the media resolution specified will be used. + */ + mediaResolution?: MediaResolution } export interface GeminiCachedContentOptions { @@ -232,15 +229,26 @@ export interface GeminiThinkingOptions { /** * The number of thoughts tokens that the model should generate. */ - thinkingBudget: number + thinkingBudget?: number + } +} + +export interface GeminiThinkingAdvancedOptions { + /** + * Config for thinking features. An error will be returned if this field is set for models that don't support thinking. + */ + thinkingConfig?: { /** * The level of thoughts tokens that the model should generate. */ - thinkingLevel?: ThinkingLevel + thinkingLevel?: keyof typeof ThinkingLevel } } export type ExternalTextProviderOptions = GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & - GeminiCachedContentOptions + GeminiCommonConfigOptions & + GeminiCachedContentOptions & + GeminiThinkingOptions & + GeminiThinkingAdvancedOptions & + GeminiStructuredOutputOptions diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index 8c3e186c..1f000171 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -101,7 +101,7 @@ describe('GeminiAdapter through AI', () => { adapter, messages: [{ role: 'user', content: 'How is the weather in Madrid?' }], modelOptions: { - generationConfig: { topK: 9 }, + topK: 9, }, temperature: 0.4, topP: 0.8, @@ -227,49 +227,28 @@ describe('GeminiAdapter through AI', () => { expect(config.maxOutputTokens).toBe(512) expect(config.cachedContent).toBe(providerOptions.cachedContent) expect(config.safetySettings).toEqual(providerOptions.safetySettings) - expect(config.generationConfig).toEqual(providerOptions.generationConfig) - expect(config.stopSequences).toEqual( - providerOptions.generationConfig?.stopSequences, - ) - expect(config.responseMimeType).toBe( - providerOptions.generationConfig?.responseMimeType, - ) - expect(config.responseSchema).toEqual( - providerOptions.generationConfig?.responseSchema, - ) + expect(config.stopSequences).toEqual(providerOptions?.stopSequences) + expect(config.responseMimeType).toBe(providerOptions?.responseMimeType) + expect(config.responseSchema).toEqual(providerOptions?.responseSchema) expect(config.responseJsonSchema).toEqual( - providerOptions.generationConfig?.responseJsonSchema, + providerOptions?.responseJsonSchema, ) expect(config.responseModalities).toEqual( - providerOptions.generationConfig?.responseModalities, - ) - expect(config.candidateCount).toBe( - providerOptions.generationConfig?.candidateCount, - ) - expect(config.topK).toBe(providerOptions.generationConfig?.topK) - expect(config.seed).toBe(providerOptions.generationConfig?.seed) - expect(config.presencePenalty).toBe( - providerOptions.generationConfig?.presencePenalty, + providerOptions?.responseModalities, ) - expect(config.frequencyPenalty).toBe( - providerOptions.generationConfig?.frequencyPenalty, - ) - expect(config.responseLogprobs).toBe( - providerOptions.generationConfig?.responseLogprobs, - ) - expect(config.logprobs).toBe(providerOptions.generationConfig?.logprobs) + expect(config.candidateCount).toBe(providerOptions?.candidateCount) + expect(config.topK).toBe(providerOptions?.topK) + expect(config.seed).toBe(providerOptions?.seed) + expect(config.presencePenalty).toBe(providerOptions?.presencePenalty) + expect(config.frequencyPenalty).toBe(providerOptions?.frequencyPenalty) + expect(config.responseLogprobs).toBe(providerOptions?.responseLogprobs) + expect(config.logprobs).toBe(providerOptions?.logprobs) expect(config.enableEnhancedCivicAnswers).toBe( - providerOptions.generationConfig?.enableEnhancedCivicAnswers, - ) - expect(config.speechConfig).toEqual( - providerOptions.generationConfig?.speechConfig, - ) - expect(config.thinkingConfig).toEqual( - providerOptions.generationConfig?.thinkingConfig, - ) - expect(config.imageConfig).toEqual( - providerOptions.generationConfig?.imageConfig, + providerOptions?.enableEnhancedCivicAnswers, ) + expect(config.speechConfig).toEqual(providerOptions?.speechConfig) + expect(config.thinkingConfig).toEqual(providerOptions?.thinkingConfig) + expect(config.imageConfig).toEqual(providerOptions?.imageConfig) }) it('streams chat chunks using mapped provider config', async () => { @@ -308,7 +287,7 @@ describe('GeminiAdapter through AI', () => { adapter, messages: [{ role: 'user', content: 'Tell me a joke' }], modelOptions: { - generationConfig: { topK: 3 }, + topK: 3, }, temperature: 0.2, })) { diff --git a/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts b/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts index f5ce029f..0f68bf06 100644 --- a/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts +++ b/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts @@ -55,7 +55,6 @@ async function testToolWithEmptyObjectSchema() { console.log(' Tool name:', getGuitarsTool.name) console.log(' Input schema:', getGuitarsTool.inputSchema.toString()) console.log(' User message:', messages[0].content) - console.log() try { console.log('📥 Streaming response...\n') diff --git a/packages/typescript/ai/src/activities/chat/stream/processor.ts b/packages/typescript/ai/src/activities/chat/stream/processor.ts index 3c827eab..8873d124 100644 --- a/packages/typescript/ai/src/activities/chat/stream/processor.ts +++ b/packages/typescript/ai/src/activities/chat/stream/processor.ts @@ -732,7 +732,7 @@ export class StreamProcessor { let nextThinking = previous // Prefer delta over content - if (chunk.delta !== '') { + if (chunk.delta && chunk.delta !== '') { nextThinking = previous + chunk.delta } else if (chunk.content !== '') { if (chunk.content.startsWith(previous)) {