Skip to content

Commit 3ec2afb

Browse files
improve: adds better error handling (#12)
1 parent f964c6d commit 3ec2afb

File tree

5 files changed

+271
-50
lines changed

5 files changed

+271
-50
lines changed

README.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,35 @@ Task {
236236

237237
To learn more about structured outputs, check out the [OpenAI documentation](https://platform.openai.com/docs/guides/structured-outputs/introduction).
238238

239+
### Error Handling
240+
241+
`LLMChatOpenAI` provides structured error handling through the `LLMChatOpenAIError` enum. This enum contains three cases that represent different types of errors you might encounter:
242+
243+
```swift
244+
let messages = [
245+
ChatMessage(role: .system, content: "You are a helpful assistant."),
246+
ChatMessage(role: .user, content: "What is the capital of Indonesia?")
247+
]
248+
249+
do {
250+
let completion = try await chat.send(model: "gpt-4o", messages: messages)
251+
252+
print(completion.choices.first?.message.content ?? "No response")
253+
} catch let error as LLMChatOpenAIError {
254+
switch error {
255+
case .serverError(let message):
256+
// Handle server-side errors (e.g., invalid API key, rate limits)
257+
print("Server Error: \(message)")
258+
case .networkError(let error):
259+
// Handle network-related errors (e.g., no internet connection)
260+
print("Network Error: \(error.localizedDescription)")
261+
case .badServerResponse:
262+
// Handle invalid server responses
263+
print("Invalid response received from server")
264+
}
265+
}
266+
```
267+
239268
## Related Packages
240269

241270
- [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever)

Sources/LLMChatOpenAI/Documentation.docc/Documentation.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,35 @@ Task {
207207

208208
To learn more about structured outputs, check out the [OpenAI documentation](https://platform.openai.com/docs/guides/structured-outputs/introduction).
209209

210+
### Error Handling
211+
212+
``LLMChatOpenAI`` provides structured error handling through the ``LLMChatOpenAIError`` enum. This enum contains three cases that represent different types of errors you might encounter:
213+
214+
```swift
215+
let messages = [
216+
ChatMessage(role: .system, content: "You are a helpful assistant."),
217+
ChatMessage(role: .user, content: "What is the capital of Indonesia?")
218+
]
219+
220+
do {
221+
let completion = try await chat.send(model: "gpt-4o", messages: messages)
222+
223+
print(completion.choices.first?.message.content ?? "No response")
224+
} catch let error as LLMChatOpenAIError {
225+
switch error {
226+
case .serverError(let message):
227+
// Handle server-side errors (e.g., invalid API key, rate limits)
228+
print("Server Error: \(message)")
229+
case .networkError(let error):
230+
// Handle network-related errors (e.g., no internet connection)
231+
print("Network Error: \(error.localizedDescription)")
232+
case .badServerResponse:
233+
// Handle invalid server responses
234+
print("Invalid response received from server")
235+
}
236+
}
237+
```
238+
210239
## Related Packages
211240

212241
- [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever)

Sources/LLMChatOpenAI/LLMChatOpenAI.swift

Lines changed: 78 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,23 @@ public struct LLMChatOpenAI {
3030
self.endpoint = endpoint ?? URL(string: "https://api.openai.com/v1/chat/completions")!
3131
self.headers = headers
3232
}
33+
34+
var allHeaders: [String: String] {
35+
var defaultHeaders = [
36+
"Content-Type": "application/json",
37+
"Authorization": "Bearer \(apiKey)"
38+
]
39+
40+
if let headers {
41+
defaultHeaders.merge(headers) { _, new in new }
42+
}
43+
44+
return defaultHeaders
45+
}
3346
}
3447

35-
extension LLMChatOpenAI {
48+
// MARK: - Send
49+
public extension LLMChatOpenAI {
3650
/// Sends a chat completion request.
3751
///
3852
/// - Parameters:
@@ -41,7 +55,7 @@ extension LLMChatOpenAI {
4155
/// - options: Optional ``ChatOptions`` that customize the completion request.
4256
///
4357
/// - Returns: A ``ChatCompletion`` object that contains the API's response.
44-
public func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
58+
func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
4559
let body = RequestBody(stream: false, model: model, messages: messages, options: options)
4660

4761
return try await performRequest(with: body)
@@ -57,7 +71,7 @@ extension LLMChatOpenAI {
5771
/// - Returns: A ``ChatCompletion`` object that contains the API's response.
5872
///
5973
/// - Note: This method enables fallback functionality when using OpenRouter. For other providers, only the first model in the array will be used.
60-
public func send(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
74+
func send(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
6175
let body: RequestBody
6276

6377
if isSupportFallbackModel {
@@ -68,7 +82,10 @@ extension LLMChatOpenAI {
6882

6983
return try await performRequest(with: body)
7084
}
71-
85+
}
86+
87+
// MARK: - Stream
88+
public extension LLMChatOpenAI {
7289
/// Streams a chat completion request.
7390
///
7491
/// - Parameters:
@@ -77,7 +94,7 @@ extension LLMChatOpenAI {
7794
/// - options: Optional ``ChatOptions`` that customize the completion request.
7895
///
7996
/// - Returns: An `AsyncThrowingStream` of ``ChatCompletionChunk`` objects.
80-
public func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
97+
func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
8198
let body = RequestBody(stream: true, model: model, messages: messages, options: options)
8299

83100
return performStreamRequest(with: body)
@@ -93,7 +110,7 @@ extension LLMChatOpenAI {
93110
/// - Returns: An `AsyncThrowingStream` of ``ChatCompletionChunk`` objects.
94111
///
95112
/// - Note: This method enables fallback functionality when using OpenRouter. For other providers, only the first model in the array will be used.
96-
public func stream(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
113+
func stream(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
97114
let body: RequestBody
98115

99116
if isSupportFallbackModel {
@@ -104,22 +121,58 @@ extension LLMChatOpenAI {
104121

105122
return performStreamRequest(with: body)
106123
}
107-
108-
private func performRequest(with body: RequestBody) async throws -> ChatCompletion {
109-
let request = try createRequest(for: endpoint, with: body)
110-
let (data, response) = try await URLSession.shared.data(for: request)
111-
try validateHTTPResponse(response)
124+
}
125+
126+
// MARK: - Helpers
127+
private extension LLMChatOpenAI {
128+
func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest {
129+
var request = URLRequest(url: url)
130+
request.httpMethod = "POST"
131+
request.httpBody = try JSONEncoder().encode(body)
132+
request.allHTTPHeaderFields = allHeaders
112133

113-
return try JSONDecoder().decode(ChatCompletion.self, from: data)
134+
return request
135+
}
136+
137+
func performRequest(with body: RequestBody) async throws -> ChatCompletion {
138+
do {
139+
let request = try createRequest(for: endpoint, with: body)
140+
let (data, response) = try await URLSession.shared.data(for: request)
141+
142+
if let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) {
143+
throw LLMChatOpenAIError.serverError(errorResponse.error.message)
144+
}
145+
146+
guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
147+
throw LLMChatOpenAIError.badServerResponse
148+
}
149+
150+
return try JSONDecoder().decode(ChatCompletion.self, from: data)
151+
} catch let error as LLMChatOpenAIError {
152+
throw error
153+
} catch {
154+
throw LLMChatOpenAIError.networkError(error)
155+
}
114156
}
115157

116-
private func performStreamRequest(with body: RequestBody) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
158+
func performStreamRequest(with body: RequestBody) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
117159
AsyncThrowingStream { continuation in
118160
Task {
119161
do {
120162
let request = try createRequest(for: endpoint, with: body)
121163
let (bytes, response) = try await URLSession.shared.bytes(for: request)
122-
try validateHTTPResponse(response)
164+
165+
guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
166+
for try await line in bytes.lines {
167+
if let data = line.data(using: .utf8), let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) {
168+
throw LLMChatOpenAIError.serverError(errorResponse.error.message)
169+
}
170+
171+
break
172+
}
173+
174+
throw LLMChatOpenAIError.badServerResponse
175+
}
123176

124177
for try await line in bytes.lines {
125178
if line.hasPrefix("data: ") {
@@ -138,45 +191,16 @@ extension LLMChatOpenAI {
138191
}
139192

140193
continuation.finish()
141-
} catch {
194+
} catch let error as LLMChatOpenAIError {
142195
continuation.finish(throwing: error)
196+
} catch {
197+
continuation.finish(throwing: LLMChatOpenAIError.networkError(error))
143198
}
144199
}
145200
}
146201
}
147202
}
148203

149-
// MARK: - Helper Methods
150-
private extension LLMChatOpenAI {
151-
var allHeaders: [String: String] {
152-
var defaultHeaders = [
153-
"Content-Type": "application/json",
154-
"Authorization": "Bearer \(apiKey)"
155-
]
156-
157-
if let headers {
158-
defaultHeaders.merge(headers) { _, new in new }
159-
}
160-
161-
return defaultHeaders
162-
}
163-
164-
func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest {
165-
var request = URLRequest(url: url)
166-
request.httpMethod = "POST"
167-
request.httpBody = try JSONEncoder().encode(body)
168-
request.allHTTPHeaderFields = allHeaders
169-
170-
return request
171-
}
172-
173-
func validateHTTPResponse(_ response: URLResponse) throws {
174-
guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
175-
throw URLError(.badServerResponse)
176-
}
177-
}
178-
}
179-
180204
// MARK: - Supporting Types
181205
private extension LLMChatOpenAI {
182206
struct RequestBody: Encodable {
@@ -228,4 +252,12 @@ private extension LLMChatOpenAI {
228252
case streamOptions = "stream_options"
229253
}
230254
}
255+
256+
struct ChatCompletionError: Codable {
257+
let error: Error
258+
259+
struct Error: Codable {
260+
public let message: String
261+
}
262+
}
231263
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
//
2+
// LLMChatOpenAIError.swift
3+
// LLMChatOpenAI
4+
//
5+
// Created by Kevin Hermawan on 10/27/24.
6+
//
7+
8+
import Foundation
9+
10+
/// An enum that represents errors from the chat completion request.
11+
public enum LLMChatOpenAIError: LocalizedError {
12+
/// A case that represents a server-side error response.
13+
///
14+
/// - Parameter message: The error message from the server.
15+
case serverError(String)
16+
17+
/// A case that represents a network-related error.
18+
///
19+
/// - Parameter error: The underlying network error.
20+
case networkError(Error)
21+
22+
/// A case that represents an invalid server response.
23+
case badServerResponse
24+
25+
/// A localized message that describes the error.
26+
public var errorDescription: String? {
27+
switch self {
28+
case .serverError(let error):
29+
return error
30+
case .networkError(let error):
31+
return error.localizedDescription
32+
case .badServerResponse:
33+
return "Invalid response received from server"
34+
}
35+
}
36+
}

0 commit comments

Comments
 (0)