diff --git a/src/Extensions/OpenAI/AzureOpenAIChatClient.cs b/src/Extensions/OpenAI/AzureOpenAIChatClient.cs
index 8f215b8..d1c615b 100644
--- a/src/Extensions/OpenAI/AzureOpenAIChatClient.cs
+++ b/src/Extensions/OpenAI/AzureOpenAIChatClient.cs
@@ -45,11 +45,11 @@ public AzureOpenAIChatClient(Uri endpoint, ApiKeyCredential credential, string m
///
public Task GetResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellation = default)
- => GetChatClient(options?.ModelId ?? modelId).GetResponseAsync(messages, options.SetResponseOptions(), cancellation);
+ => GetChatClient(options?.ModelId ?? modelId).GetResponseAsync(messages, options.ApplyExtensions(), cancellation);
///
public IAsyncEnumerable GetStreamingResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellation = default)
- => GetChatClient(options?.ModelId ?? modelId).GetStreamingResponseAsync(messages, options.SetResponseOptions(), cancellation);
+ => GetChatClient(options?.ModelId ?? modelId).GetStreamingResponseAsync(messages, options.ApplyExtensions(), cancellation);
IChatClient GetChatClient(string modelId) => clients.GetOrAdd(modelId, model
=> new PipelineClient(pipeline, endpoint, options).GetOpenAIResponseClient(modelId).AsIChatClient());
diff --git a/src/Extensions/OpenAI/OpenAIChatClient.cs b/src/Extensions/OpenAI/OpenAIChatClient.cs
index cb43665..33a08e0 100644
--- a/src/Extensions/OpenAI/OpenAIChatClient.cs
+++ b/src/Extensions/OpenAI/OpenAIChatClient.cs
@@ -37,11 +37,11 @@ public OpenAIChatClient(string apiKey, string modelId, OpenAIClientOptions? opti
///
public Task GetResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellation = default)
- => GetChatClient(options?.ModelId ?? modelId).GetResponseAsync(messages, options.SetResponseOptions(), cancellation);
+ => GetChatClient(options?.ModelId ?? modelId).GetResponseAsync(messages, options.ApplyExtensions(), cancellation);
///
public IAsyncEnumerable GetStreamingResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellation = default)
- => GetChatClient(options?.ModelId ?? modelId).GetStreamingResponseAsync(messages, options.SetResponseOptions(), cancellation);
+ => GetChatClient(options?.ModelId ?? modelId).GetStreamingResponseAsync(messages, options.ApplyExtensions(), cancellation);
IChatClient GetChatClient(string modelId) => clients.GetOrAdd(modelId, model
=> new PipelineClient(pipeline, options).GetOpenAIResponseClient(modelId).AsIChatClient());
diff --git a/src/Extensions/OpenAI/OpenAIExtensions.cs b/src/Extensions/OpenAI/OpenAIExtensions.cs
index f4e4313..13e7c56 100644
--- a/src/Extensions/OpenAI/OpenAIExtensions.cs
+++ b/src/Extensions/OpenAI/OpenAIExtensions.cs
@@ -5,9 +5,23 @@
namespace Devlooped.Extensions.AI.OpenAI;
-static class OpenAIExtensions
+///
+/// Allows applying extension properties to the when using
+/// them with an OpenAI client.
+///
+public static class OpenAIExtensions
{
- public static ChatOptions? SetResponseOptions(this ChatOptions? options)
+ ///
+ /// Applies the extension properties to the so that
+ /// the underlying OpenAI client can properly forward them to the endpoint.
+ ///
+ ///
+ /// Only use this if you are not using , which already applies
+ /// extensions before sending requests.
+ ///
+ /// An options with the right replaced
+ /// so it can forward extensions to the underlying OpenAI API.
+ public static ChatOptions? ApplyExtensions(this ChatOptions? options)
{
if (options is null)
return null;
diff --git a/src/Extensions/ReasoningEffort.cs b/src/Extensions/ReasoningEffort.cs
index 476aec2..09c7cd9 100644
--- a/src/Extensions/ReasoningEffort.cs
+++ b/src/Extensions/ReasoningEffort.cs
@@ -5,6 +5,11 @@
///
public enum ReasoningEffort
{
+ ///
+ /// Lowest latency by indicating no reasoning tokens should be spent at all. Support depends on the model.
+ ///
+ ///
+ None,
///
/// Minimal reasoning effort, which may result in faster responses. Support depends on the model.
///
diff --git a/src/Tests/OpenAITests.cs b/src/Tests/OpenAITests.cs
index 3263497..9fd1880 100644
--- a/src/Tests/OpenAITests.cs
+++ b/src/Tests/OpenAITests.cs
@@ -121,6 +121,7 @@ public async Task GPT5_ThinksFast()
}
[SecretsTheory("OPENAI_API_KEY")]
+ [InlineData(ReasoningEffort.None)]
[InlineData(ReasoningEffort.Minimal)]
[InlineData(ReasoningEffort.Low)]
[InlineData(ReasoningEffort.Medium)]
@@ -135,7 +136,7 @@ public async Task GPT5_ThinkingTime(ReasoningEffort effort)
var requests = new List();
- var chat = new OpenAIChatClient(Configuration["OPENAI_API_KEY"]!, "gpt-5-nano",
+ var chat = new OpenAIChatClient(Configuration["OPENAI_API_KEY"]!, "gpt-5.2",
OpenAIClientOptions.Observable(requests.Add).WriteTo(output));
var options = new ChatOptions
@@ -166,6 +167,45 @@ public async Task GPT5_ThinkingTime(ReasoningEffort effort)
output.WriteLine($"Effort: {effort}, Time: {watch.ElapsedMilliseconds}ms, Tokens: {response.Usage?.TotalTokenCount}");
}
+ [SecretsFact("OPENAI_API_KEY")]
+ public async Task GPT5_NoReasoningTokens()
+ {
+ var requests = new List();
+
+ //var chat = new OpenAIChatClient(Configuration["OPENAI_API_KEY"]!, "gpt-4o",
+ // OpenAIClientOptions.Observable(requests.Add).WriteTo(output));
+
+ var chat = new OpenAIClient(new ApiKeyCredential(Configuration["OPENAI_API_KEY"]!),
+ OpenAIClientOptions.Observable(requests.Add).WriteTo(output))
+ .GetOpenAIResponseClient("gpt-4o")
+ .AsIChatClient();
+
+ var reasoned = await chat.GetResponseAsync(
+ "How much gold would it take to coat the Statue of Liberty in a 1mm layer?",
+ new ChatOptions
+ {
+ ModelId = "gpt-5.1",
+ ReasoningEffort = ReasoningEffort.Low
+ }.ApplyExtensions());
+
+ Assert.StartsWith("gpt-5.1", reasoned.ModelId);
+ Assert.NotNull(reasoned.Usage?.AdditionalCounts);
+ Assert.True(reasoned.Usage.AdditionalCounts.ContainsKey("OutputTokenDetails.ReasoningTokenCount"));
+ Assert.True(reasoned.Usage.AdditionalCounts["OutputTokenDetails.ReasoningTokenCount"] > 0);
+
+ var nonreasoned = await chat.GetResponseAsync(
+ "How much gold would it take to coat the Statue of Liberty in a 1mm layer?",
+ new ChatOptions
+ {
+ ModelId = "gpt-5.1",
+ ReasoningEffort = ReasoningEffort.None
+ }.ApplyExtensions());
+
+ Assert.NotNull(nonreasoned.Usage?.AdditionalCounts);
+ Assert.True(nonreasoned.Usage.AdditionalCounts.ContainsKey("OutputTokenDetails.ReasoningTokenCount"));
+ Assert.True(nonreasoned.Usage.AdditionalCounts["OutputTokenDetails.ReasoningTokenCount"] == 0);
+ }
+
[SecretsTheory("OPENAI_API_KEY")]
[InlineData(Verbosity.Low)]
[InlineData(Verbosity.Medium)]