diff --git a/FirebaseAI/CHANGELOG.md b/FirebaseAI/CHANGELOG.md index 06f5f30908d..4ee5c21b9a6 100644 --- a/FirebaseAI/CHANGELOG.md +++ b/FirebaseAI/CHANGELOG.md @@ -1,3 +1,6 @@ +# Unreleased +- [fixed] Fixed various links in the Live API doc comments not mapping correctly. + # 12.4.0 - [feature] Added support for the URL context tool, which allows the model to access content from provided public web URLs to inform and enhance its responses. (#15221) diff --git a/FirebaseAI/Sources/FirebaseAI.swift b/FirebaseAI/Sources/FirebaseAI.swift index f9ff5ea0424..354c16b79ab 100644 --- a/FirebaseAI/Sources/FirebaseAI.swift +++ b/FirebaseAI/Sources/FirebaseAI.swift @@ -63,7 +63,7 @@ public final class FirebaseAI: Sendable { /// guidance on choosing an appropriate model for your use case. /// /// - Parameters: - /// - modelName: The name of the model to use, for example `"gemini-1.5-flash"`; see + /// - modelName: The name of the model to use; see /// [available model names /// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a /// list of supported model names. @@ -106,12 +106,11 @@ public final class FirebaseAI: Sendable { /// Initializes an ``ImagenModel`` with the given parameters. /// - /// > Important: Only Imagen 3 models (named `imagen-3.0-*`) are supported. + /// - Note: Refer to [Imagen models](https://firebase.google.com/docs/vertex-ai/models) for + /// guidance on choosing an appropriate model for your use case. /// /// - Parameters: - /// - modelName: The name of the Imagen 3 model to use, for example `"imagen-3.0-generate-002"`; - /// see [model versions](https://firebase.google.com/docs/vertex-ai/models) for a list of - /// supported Imagen 3 models. + /// - modelName: The name of the Imagen 3 model to use. /// - generationConfig: Configuration options for generating images with Imagen. /// - safetySettings: Settings describing what types of potentially harmful content your model /// should allow. @@ -138,18 +137,16 @@ public final class FirebaseAI: Sendable { /// **[Public Preview]** Initializes a ``LiveGenerativeModel`` with the given parameters. /// + /// - Note: Refer to [the Firebase docs on the Live + /// API](https://firebase.google.com/docs/ai-logic/live-api#models-that-support-capability) for + /// guidance on choosing an appropriate model for your use case. + /// /// > Warning: Using the Firebase AI Logic SDKs with the Gemini Live API is in Public /// Preview, which means that the feature is not subject to any SLA or deprecation policy and /// could change in backwards-incompatible ways. /// - /// > Important: Only models that support the Gemini Live API (typically containing `live-*` in - /// the name) are supported. - /// /// - Parameters: - /// - modelName: The name of the model to use, for example - /// `"gemini-live-2.5-flash-preview"`; - /// see [model versions](https://firebase.google.com/docs/ai-logic/live-api?api=dev#models-that-support-capability) - /// for a list of supported models. + /// - modelName: The name of the model to use. /// - generationConfig: The content generation parameters your model should use. /// - tools: A list of ``Tool`` objects that the model may use to generate the next response. /// - toolConfig: Tool configuration for any ``Tool`` specified in the request. diff --git a/FirebaseAI/Sources/GenerativeModel.swift b/FirebaseAI/Sources/GenerativeModel.swift index 428e1fe6f26..e3f905793ad 100644 --- a/FirebaseAI/Sources/GenerativeModel.swift +++ b/FirebaseAI/Sources/GenerativeModel.swift @@ -59,7 +59,7 @@ public final class GenerativeModel: Sendable { /// Initializes a new remote model with the given parameters. /// /// - Parameters: - /// - modelName: The name of the model, for example "gemini-2.0-flash". + /// - modelName: The name of the model. /// - modelResourceName: The model resource name corresponding with `modelName` in the backend. /// The form depends on the backend and will be one of: /// - Vertex AI via Firebase AI SDK: diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveGenerationConfig.swift b/FirebaseAI/Sources/Types/Public/Live/LiveGenerationConfig.swift index 21692f27eed..c7033567a91 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveGenerationConfig.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveGenerationConfig.swift @@ -107,13 +107,14 @@ public struct LiveGenerationConfig: Sendable { /// the model. /// /// Input transcripts are the model's interpretation of audio data sent to it, and they are - /// populated in model responses via ``LiveServerContent``. When this field is set to `nil`, - /// input transcripts are not populated in model responses. + /// populated in model responses via ``LiveServerContent/inputAudioTranscription``. When this + /// field is set to `nil`, input transcripts are not populated in model responses. /// - outputAudioTranscription: Configures (and enables) output transcriptions when streaming to /// the model. /// /// Output transcripts are text representations of the audio the model is sending to the - /// client, and they are populated in model responses via ``LiveServerContent``. When this + /// client, and they are populated in model responses via + /// ``LiveServerContent/outputAudioTranscription``. When this /// field is set to `nil`, output transcripts are not populated in model responses. /// /// > Important: Transcripts are independent to the model turn. This means transcripts may diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveGenerativeModel.swift b/FirebaseAI/Sources/Types/Public/Live/LiveGenerativeModel.swift index a9168789ff3..3a8236cb1d5 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveGenerativeModel.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveGenerativeModel.swift @@ -17,7 +17,7 @@ import Foundation /// A multimodal model (like Gemini) capable of real-time content generation based on /// various input types, supporting bidirectional streaming. /// -/// You can create a new session via ``connect()``. +/// You can create a new session via ``LiveGenerativeModel/connect()``. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *) @available(watchOS, unavailable) public final class LiveGenerativeModel { diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveServerContent.swift b/FirebaseAI/Sources/Types/Public/Live/LiveServerContent.swift index 25e29e4b891..15a8b310cf6 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveServerContent.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveServerContent.swift @@ -45,13 +45,15 @@ public struct LiveServerContent: Sendable { /// The model has finished _generating_ data for the current turn. /// /// For realtime playback, there will be a delay between when the model finishes generating - /// content and the client has finished playing back the generated content. `generationComplete` - /// indicates that the model is done generating data, while `isTurnComplete` indicates the model - /// is waiting for additional client messages. Sending a message during this delay may cause a - /// `wasInterrupted` message to be sent. + /// content and the client has finished playing back the generated content. + /// ``LiveServerContent/isGenerationComplete`` indicates that the model is done generating data, + /// while ``LiveServerContent/isTurnComplete`` indicates the model is waiting for additional + /// client messages. Sending a message during this delay may cause a + /// ``LiveServerContent/wasInterrupted`` message to be sent. /// - /// Note that if the model `wasInterrupted`, this will not be set. The model will go from - /// `wasInterrupted` -> `turnComplete`. + /// > Important: If the model ``LiveServerContent/wasInterrupted``, this will not be set. The + /// > model will go from ``LiveServerContent/wasInterrupted`` -> + /// > ``LiveServerContent/isTurnComplete``. public var isGenerationComplete: Bool { serverContent.generationComplete ?? false } /// Metadata specifying the sources used to ground generated content. @@ -60,7 +62,7 @@ public struct LiveServerContent: Sendable { /// The model's interpretation of what the client said in an audio message. /// /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to - /// ``LiveGenerationConfig``. + /// the `inputAudioTranscription` field in ``LiveGenerationConfig``. public var inputAudioTranscription: LiveAudioTranscription? { serverContent.inputTranscription.map { LiveAudioTranscription($0) } } @@ -68,7 +70,7 @@ public struct LiveServerContent: Sendable { /// Transcription matching the model's audio response. /// /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to - /// ``LiveGenerationConfig``. + /// the `outputAudioTranscription` field in ``LiveGenerationConfig``. /// /// > Important: Transcripts are independent to the model turn. This means transcripts may /// > come earlier or later than when the model sends the corresponding audio responses. diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCall.swift b/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCall.swift index 7209e312c76..6c55ee5ff4c 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCall.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCall.swift @@ -14,8 +14,8 @@ /// Request for the client to execute the provided ``functionCalls``. /// -/// The client should return matching ``FunctionResponsePart``, where the `functionId` fields -/// correspond to individual ``FunctionCallPart``s. +/// The client should return matching ``FunctionResponsePart``, where the +/// ``FunctionResponsePart/functionId`` fields correspond to individual ``FunctionCallPart``s. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) @available(watchOS, unavailable) public struct LiveServerToolCall: Sendable { diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCallCancellation.swift b/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCallCancellation.swift index ca7973c64b7..1572c30c5bc 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCallCancellation.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveServerToolCallCancellation.swift @@ -20,8 +20,8 @@ @available(watchOS, unavailable) public struct LiveServerToolCallCancellation: Sendable { let serverToolCallCancellation: BidiGenerateContentToolCallCancellation - /// A list of `functionId`s matching the `functionId` provided in a previous - /// ``LiveServerToolCall``, where only the provided `functionId`s should be cancelled. + /// A list of function ids matching the ``FunctionCallPart/functionId`` provided in a previous + /// ``LiveServerToolCall``, where only the provided ids should be cancelled. public var ids: [String]? { serverToolCallCancellation.ids } init(_ serverToolCallCancellation: BidiGenerateContentToolCallCancellation) { diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveSession.swift b/FirebaseAI/Sources/Types/Public/Live/LiveSession.swift index 3e5e6923a59..0799e35dc03 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveSession.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveSession.swift @@ -16,9 +16,10 @@ import Foundation /// A live WebSocket session, capable of streaming content to and from the model. /// -/// Messages are streamed through ``responses``, and can be sent through either the dedicated -/// realtime API function (such as ``sendAudioRealtime(audio:)`` or ``sendTextRealtime(text:)``), or -/// through the incremental API (such as ``sendContent(_:turnComplete:)``). +/// Messages are streamed through ``LiveSession/responses``, and can be sent through either the +/// dedicated realtime API function (such as ``LiveSession/sendAudioRealtime(_:)`` and +/// ``LiveSession/sendTextRealtime(_:)``), or through the incremental API (such as +/// ``LiveSession/sendContent(_:turnComplete:)-6x3ae``). /// /// To create an instance of this class, see ``LiveGenerativeModel``. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) @@ -26,7 +27,7 @@ import Foundation public final class LiveSession: Sendable { private let service: LiveSessionService - /// An asyncronous stream of messages from the server. + /// An asynchronous stream of messages from the server. /// /// These messages from the incremental updates from the model, for the current conversation. public var responses: AsyncThrowingStream { service.responses } @@ -41,7 +42,7 @@ public final class LiveSession: Sendable { /// /// - Parameters: /// - responses: Client generated function results, matched to their respective - /// ``FunctionCallPart`` by the `functionId` field. + /// ``FunctionCallPart`` by the ``FunctionCallPart/functionId`` field. public func sendFunctionResponses(_ responses: [FunctionResponsePart]) async { let message = BidiGenerateContentToolResponse( functionResponses: responses.map { $0.functionResponse } diff --git a/FirebaseAI/Sources/Types/Public/Live/LiveSessionErrors.swift b/FirebaseAI/Sources/Types/Public/Live/LiveSessionErrors.swift index 90b7ab84476..59a1e920e84 100644 --- a/FirebaseAI/Sources/Types/Public/Live/LiveSessionErrors.swift +++ b/FirebaseAI/Sources/Types/Public/Live/LiveSessionErrors.swift @@ -20,7 +20,8 @@ import Foundation /// version, or that the model is just /// not supported. /// -/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this. +/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionUnsupportedMessageError/errorUserInfo`` +/// for the error that caused this. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *) @available(watchOS, unavailable) public struct LiveSessionUnsupportedMessageError: Error, Sendable, CustomNSError { @@ -40,7 +41,8 @@ public struct LiveSessionUnsupportedMessageError: Error, Sendable, CustomNSError /// The live session was closed, because the network connection was lost. /// -/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this. +/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionLostConnectionError/errorUserInfo`` for +/// the error that caused this. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *) @available(watchOS, unavailable) public struct LiveSessionLostConnectionError: Error, Sendable, CustomNSError { @@ -60,7 +62,8 @@ public struct LiveSessionLostConnectionError: Error, Sendable, CustomNSError { /// The live session was closed, but not for a reason the SDK expected. /// -/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this. +/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionUnexpectedClosureError/errorUserInfo`` +/// for the error that caused this. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *) @available(watchOS, unavailable) public struct LiveSessionUnexpectedClosureError: Error, Sendable, CustomNSError { @@ -83,7 +86,8 @@ public struct LiveSessionUnexpectedClosureError: Error, Sendable, CustomNSError /// This can occur due to the model not supporting the requested response modalities, the project /// not having access to the model, the model being invalid, or some internal error. /// -/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this. +/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionSetupError/errorUserInfo`` for the error +/// that caused this. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *) @available(watchOS, unavailable) public struct LiveSessionSetupError: Error, Sendable, CustomNSError { diff --git a/FirebaseAI/Sources/Types/Public/Live/SpeechConfig.swift b/FirebaseAI/Sources/Types/Public/Live/SpeechConfig.swift index 67f4799f6e4..a8e291d62f3 100644 --- a/FirebaseAI/Sources/Types/Public/Live/SpeechConfig.swift +++ b/FirebaseAI/Sources/Types/Public/Live/SpeechConfig.swift @@ -24,7 +24,7 @@ public struct SpeechConfig: Sendable { self.speechConfig = speechConfig } - /// Creates a new `LiveSpeechConfig` value. + /// Creates a new ``SpeechConfig`` value. /// /// - Parameters: /// - voiceName: The name of the prebuilt voice to be used for the model's speech response. diff --git a/FirebaseAI/Sources/Types/Public/Part.swift b/FirebaseAI/Sources/Types/Public/Part.swift index 8acf7b12e9a..379ba6e6a59 100644 --- a/FirebaseAI/Sources/Types/Public/Part.swift +++ b/FirebaseAI/Sources/Types/Public/Part.swift @@ -173,7 +173,7 @@ public struct FunctionCallPart: Part { /// - name: The name of the function to call. /// - args: The function parameters and values. /// - id: Unique id of the function call. If present, the returned ``FunctionResponsePart`` - /// should have a matching `id` field. + /// should have a matching ``FunctionResponsePart/functionId`` field. public init(name: String, args: JSONObject, id: String? = nil) { self.init(FunctionCall(name: name, args: args, id: id), isThought: nil, thoughtSignature: nil) } @@ -196,7 +196,7 @@ public struct FunctionResponsePart: Part { let _isThought: Bool? let thoughtSignature: String? - /// Matching `id` for a ``FunctionCallPart``, if one was provided. + /// Matching ``FunctionCallPart/functionId`` for a ``FunctionCallPart``, if one was provided. public var functionId: String? { functionResponse.id } /// The name of the function that was called. @@ -223,7 +223,8 @@ public struct FunctionResponsePart: Part { /// - Parameters: /// - name: The name of the function that was called. /// - response: The function's response. - /// - functionId: Matching `functionId` for a ``FunctionCallPart``, if one was provided. + /// - functionId: Matching ``FunctionCallPart/functionId`` for a ``FunctionCallPart``, if one + /// was provided. public init(name: String, response: JSONObject, functionId: String? = nil) { self.init( FunctionResponse(name: name, response: response, id: functionId),