Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
fb9d0f8
spelling: ; otherwise,
jsoref Nov 4, 2025
1fa2bdc
spelling: a
jsoref Nov 4, 2025
e01ca65
spelling: aggregate
jsoref Nov 4, 2025
101193f
spelling: an
jsoref Nov 4, 2025
8aba32c
spelling: attributes
jsoref Nov 4, 2025
4f56128
spelling: cannot
jsoref Nov 4, 2025
d0b5c08
spelling: column
jsoref Nov 4, 2025
cd74ca1
spelling: compaction
jsoref Nov 4, 2025
2de394e
spelling: contain
jsoref Nov 4, 2025
4afe35e
spelling: coordinates
jsoref Nov 4, 2025
2ec9cef
spelling: criterion
jsoref Nov 4, 2025
027944d
spelling: dependency
jsoref Nov 4, 2025
7055a02
spelling: encapsulated
jsoref Nov 4, 2025
445164e
spelling: encountering
jsoref Nov 4, 2025
8994d34
spelling: evaluation
jsoref Nov 4, 2025
70204be
spelling: events
jsoref Nov 4, 2025
73175e4
spelling: exception
jsoref Nov 4, 2025
f8755fe
spelling: execution
jsoref Nov 4, 2025
cca4c96
spelling: fields
jsoref Nov 4, 2025
8fbb560
spelling: format
jsoref Nov 4, 2025
8e93447
spelling: function
jsoref Nov 4, 2025
423e9fe
spelling: generate
jsoref Nov 4, 2025
f39aa6a
spelling: happened
jsoref Nov 4, 2025
6aec333
spelling: id
jsoref Nov 4, 2025
efeb257
spelling: invocation
jsoref Nov 4, 2025
0e89f8c
spelling: nonexistent
jsoref Nov 4, 2025
e2dcb9f
spelling: occurrences
jsoref Nov 4, 2025
5aea81e
spelling: optimistic
jsoref Nov 4, 2025
cae463a
spelling: overridden
jsoref Nov 4, 2025
146cd66
spelling: punctuation
jsoref Nov 4, 2025
e105db6
spelling: reporting
jsoref Nov 4, 2025
c7762b3
spelling: response
jsoref Nov 4, 2025
14aa5af
spelling: retrieve
jsoref Nov 4, 2025
1f13317
spelling: segregation
jsoref Nov 4, 2025
8658b0a
spelling: set up
jsoref Nov 4, 2025
37bdf56
spelling: summary
jsoref Nov 4, 2025
1c17d8b
spelling: the
jsoref Nov 4, 2025
70809a6
spelling: whether
jsoref Nov 4, 2025
3df5932
Merge branch 'main' into spelling
hangfei Nov 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/google/adk/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def _load_agent_state(
state_type: The type of the agent state.

Returns:
The current state if resuming, otherwise None.
The current state if resuming; otherwise, None.
"""
if not ctx.is_resumable:
return None
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/agents/config_schemas/AgentConfig.json
Original file line number Diff line number Diff line change
Expand Up @@ -1214,7 +1214,7 @@
}
],
"default": null,
"description": "The unique id of the function call. If populated, the client to execute the\n `function_call` and return the response with the matching `id`.",
"description": "The unique ID of the function call. If populated, the client to execute the\n `function_call` and return the response with the matching `id`.",
"title": "Id"
},
"args": {
Expand Down Expand Up @@ -1426,7 +1426,7 @@
}
],
"default": null,
"description": "Optional. The id of the function call this response is for. Populated by the client to match the corresponding function call `id`.",
"description": "Optional. The ID of the function call this response is for. Populated by the client to match the corresponding function call `id`.",
"title": "Id"
},
"name": {
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/agents/llm_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ async def _run_async_impl(
) -> AsyncGenerator[Event, None]:
agent_state = self._load_agent_state(ctx, BaseAgentState)

# If there is an sub-agent to resume, run it and then end the current
# If there is a sub-agent to resume, run it and then end the current
# agent.
if agent_state is not None and (
agent_to_transfer := self._get_subagent_to_resume(ctx)
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/agents/remote_a2a_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ async def _handle_a2a_response(
event = convert_a2a_task_to_event(task, self.name, ctx)
else:
# This is a streaming update without a message (e.g. status change)
# or an partial artifact update. We don't emit an event for these
# or a partial artifact update. We don't emit an event for these
# for now.
return None

Expand Down Expand Up @@ -532,7 +532,7 @@ async def _run_async_impl(
event.custom_metadata[A2A_METADATA_PREFIX + "request"] = (
a2a_request.model_dump(exclude_none=True, by_alias=True)
)
# If the response is a ClientEvent, record the task state, otherwise
# If the response is a ClientEvent, record the task state; otherwise,
# record the message object.
if isinstance(a2a_response, tuple):
event.custom_metadata[A2A_METADATA_PREFIX + "response"] = (
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/agents/run_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class StreamingMode(Enum):
class RunConfig(BaseModel):
"""Configs for runtime behavior of agents.

The configs here will be overriden by agent-specific configurations.
The configs here will be overridden by agent-specific configurations.
"""

model_config = ConfigDict(
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/apps/base_events_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ async def maybe_summarize_events(
If compaction failed, return None. Otherwise, compact into a content and
return it.

This method will summarize the events and return a new summray event
This method will summarize the events and return a new summary event
indicating the range of events it summarized.

Args:
events: Events to compact.

Returns:
The new compacted event, or None if no compaction happended.
The new compacted event, or None if no compaction happened.
"""
raise NotImplementedError()
6 changes: 3 additions & 3 deletions src/google/adk/cli/cli_tools_click.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def cli_eval(
*Eval Set File Path*
For each file, all evals will be run by default.

If you want to run only specific evals from a eval set, first create a comma
If you want to run only specific evals from an eval set, first create a comma
separated list of eval names and then add that as a suffix to the eval set
file name, demarcated by a `:`.

Expand All @@ -521,7 +521,7 @@ def cli_eval(
*Eval Set ID*
For each eval set, all evals will be run by default.

If you want to run only specific evals from a eval set, first create a comma
If you want to run only specific evals from an eval set, first create a comma
separated list of eval names and then add that as a suffix to the eval set
file name, demarcated by a `:`.

Expand Down Expand Up @@ -1598,7 +1598,7 @@ def cli_deploy_cloud_run(
default="",
help=(
"Optional. The filepath to the `.agent_engine_config.json` file to use."
" The values in this file will be overriden by the values set by other"
" The values in this file will be overridden by the values set by other"
" flags. (default: the `.agent_engine_config.json` file in the `agent`"
" directory, if any.)"
),
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/cli/service_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def _parse_agent_engine_kwargs(
"""Helper to parse agent engine resource name."""
if not uri_part:
raise ValueError(
"Agent engine resource name or resource id can not be empty."
"Agent engine resource name or resource id cannot be empty."
)
if "/" in uri_part:
parts = uri_part.split("/")
Expand Down Expand Up @@ -192,7 +192,7 @@ def rag_memory_factory(uri: str, **kwargs):

rag_corpus = urlparse(uri).netloc
if not rag_corpus:
raise ValueError("Rag corpus can not be empty.")
raise ValueError("Rag corpus cannot be empty.")
agents_dir = kwargs.get("agents_dir")
project, location = _load_gcp_config(agents_dir, "RAG memory service")
return VertexAiRagMemoryService(
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/agent_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ async def evaluate_eval_set(
eval_set: The eval set.
criteria: Evaluation criteria, a dictionary of metric names to their
respective thresholds. This field is deprecated.
eval_config: The evauation config.
eval_config: The evaluation config.
num_runs: Number of times all entries in the eval dataset should be
assessed.
agent_name: The name of the agent, if trying to evaluate something other
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/evaluation/eval_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class JudgeModelOptions(EvalBaseModel):


class BaseCriterion(BaseModel):
"""Base creterion to use for an Eval Metric."""
"""Base criterion to use for an Eval Metric."""

model_config = ConfigDict(
alias_generator=alias_generators.to_camel,
Expand Down Expand Up @@ -126,7 +126,7 @@ class RubricsBasedCriterion(BaseCriterion):
"Rubrics to be used by Metric. Not all metrics rely on rubrics, but"
" metrics like `rubric_based_final_response_quality_v1` do. Metrics"
" that don't use Rubrics, will just ignore this field, if specified."
" Metrics that do use rubrics will raise an execption, if they are"
" Metrics that do use rubrics will raise an exception, if they are"
" not specified."
),
)
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,6 @@ def evaluate_invocations(
expected_invocations: An optional list of invocations, if specified,
usually act as a benchmark/golden response. If these are specified
usually the expectation is that the length of this list and actual
invocaiton is the same.
invocation is the same.
"""
raise NotImplementedError()
6 changes: 3 additions & 3 deletions src/google/adk/evaluation/hallucinations_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@

**Instructions:**
1. Overall, you should decompose the whole provided response into individual sentences. You should make sure the output covers ALL the sentences in the provided response block.
2. You should COPY each sentence as it is, WORD BY WORD. DO NOT modify the sentence or the surrounding punctuations.
2. You should COPY each sentence as it is, WORD BY WORD. DO NOT modify the sentence or the surrounding punctuation.
3. If there are bullet points in the response, you should segment each bullet point into DIFFERENT sentences. If one bullet point has sub bullet points, you should further decompose sub bullet points into DIFFERENT sentences.
For example, if there are responses like "it has three criteria: * aaa. * bbb. * ccc", you should segment them into FOUR sentences: "it has three criteria", "aaa", "bbb", "ccc". Bullet points could start with numbers (1/2/3/etc) or symbols like "*", "-" etc.
4. When encoutering tables, you should include the whole table in ONE sentence output.
4. When encountering tables, you should include the whole table in ONE sentence output.
5. Each sentence should be meaningful to further analyze on. DO NOT ONLY put symbols themselves into a sentence.
6. You should ONLY output segmented sentences in the provided response. DO NOT make up any new sentences.

Expand Down Expand Up @@ -716,7 +716,7 @@ async def evaluate_invocations(
expected_invocations: Optional[list[Invocation]],
) -> EvaluationResult:
# expected_invocations are not required by the metric and if they are not
# supplied, we provide an a list of None to rest of the code.
# supplied, we provide a list of None to rest of the code.
expected_invocations = (
[None] * len(actual_invocations)
if expected_invocations is None
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/llm_as_judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ async def evaluate_invocations(
raise ValueError("expected_invocations is needed by this metric.")

# If expected_invocation are not required by the metric and if they are not
# supplied, we provide an a list of None.
# supplied, we provide a list of None.
expected_invocations = (
[None] * len(actual_invocations)
if expected_invocations is None
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/request_intercepter_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class _RequestIntercepterPlugin(BasePlugin):
"""A plugin that intercepts requests that are made to the model and couples them with the model response.

NOTE: This implementation is intended for eval systems internal usage. Do not
take direct depdency on it.
take direct dependency on it.

Context behind the creation of this intercepter:
Some of the newer AutoRater backed metrics need access the pieces of
Expand Down
6 changes: 3 additions & 3 deletions src/google/adk/evaluation/rubric_based_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def aggregate(
"""Returns a combined result for the invocation using majority vote.

This method takes all those samples for a single invocation and combines
them to genreate one single result for the invocation.
them to generate one single result for the invocation.

This method specifically uses majority vote to aggregate scores for a
rubric. Take following Invocation and Rubric for example:
Expand All @@ -132,7 +132,7 @@ def aggregate(
Weather Agent: No, it will be moderately warm as predicted temperature
for Seattle, WA tomorrow is 88F.

Rubric: Agent's reponse was concise and to the point.
Rubric: Agent's response was concise and to the point.

We will sample the AutoRater 5 times, and the AutoRater responds
with (skipping the rationale field for now):
Expand Down Expand Up @@ -223,7 +223,7 @@ def summarize(
assessed for each invocation. But, we do want to summarize and make a
statement on how the eval case as a whole performed on the metric.

This method helps us aggreate rubric scores across invocation.
This method helps us aggregate rubric scores across invocation.

This method calculates the mean score of a rubric across several
invocations.
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/evaluation/rubric_based_tool_use_quality_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ class RubricBasedToolUseV1Evaluator(RubricBasedEvaluator):

Example: Lets take an example of a Weather Agent that has access to two tools:
1: GeoCoding Tool: Coverts a city name, address or zip code into geographic
cordinates.
coordinates.
2: GetWeather Tool: Gets weather for the next 10 days for the given geographic
cordinates.
coordinates.

For this agent, one can create following Rubrics that could focus on tool use

Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/vertex_ai_eval_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def evaluate_invocations(
raise ValueError("expected_invocations is needed by this metric.")

# If expected_invocation are not required by the metric and if they are not
# supplied, we provide an a list of None.
# supplied, we provide a list of None.
expected_invocations = (
[None] * len(actual_invocations)
if expected_invocations is None
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ async def _run_one_step_async(
and events
and len(events) > 1
# TODO: here we are using the last 2 events to decide whether to pause
# the invocation. But this is just being optmisitic, we should find a
# the invocation. But this is just being optimistic, we should find a
# way to pause when the long running tool call is followed by more than
# one text responses.
and (
Expand Down
6 changes: 3 additions & 3 deletions src/google/adk/flows/llm_flows/contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def _rearrange_events_for_latest_function_response(
def _contains_empty_content(event: Event) -> bool:
"""Check if an event should be skipped due to missing or empty content.

This can happen to the evnets that only changed session state.
This can happen to the events that only changed session state.
When both content and transcriptions are empty, the event will be considered
as empty.

Expand Down Expand Up @@ -251,7 +251,7 @@ def _process_compaction_events(events: list[Event]) -> list[Event]:
# compaction_1(event_1, event_2, timestamp=3), event_3(timestamp=4),
# compaction_2(event_2, event_3, timestamp=5), event_4(timestamp=6)]
# for each compaction event, it only covers the events at most between the
# current compaction and the previous compaction. So during copmaction, we
# current compaction and the previous compaction. So during compaction, we
# don't have to go across compaction boundaries.
# Compaction events are always strictly in order based on event timestamp.
events_to_process = []
Expand Down Expand Up @@ -589,7 +589,7 @@ def _is_event_belongs_to_branch(
) -> bool:
"""Check if an event belongs to the current branch.

This is for event context segration between agents. E.g. agent A shouldn't
This is for event context segregation between agents. E.g. agent A shouldn't
see output of agent B.
"""
if not invocation_branch or not event.branch:
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/flows/llm_flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ async def _process_function_live_helper(
}
elif hasattr(tool, 'func') and inspect.isasyncgenfunction(tool.func):
# for streaming tool use case
# we require the function to be a async generator function
# we require the function to be an async generator function
async def run_tool_and_update_queue(tool, function_args, tool_context):
try:
async with Aclosing(
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/flows/llm_flows/request_confirmation.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async def run_async(
and len(function_response.response.values()) == 1
and 'response' in function_response.response.keys()
):
# ADK web client will send a request that is always encapted in a
# ADK web client will send a request that is always encapsulated in a
# 'response' key.
tool_confirmation = ToolConfirmation.model_validate(
json.loads(function_response.response['response'])
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def _schema_to_dict(schema: types.Schema) -> dict:
def _function_declaration_to_tool_param(
function_declaration: types.FunctionDeclaration,
) -> dict:
"""Converts a types.FunctionDeclaration to a openapi spec dictionary.
"""Converts a types.FunctionDeclaration to an openapi spec dictionary.

Args:
function_declaration: The function declaration to convert.
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/models/llm_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class LlmResponse(BaseModel):
Attributes:
content: The content of the response.
grounding_metadata: The grounding metadata of the response.
partial: Indicates whether the text content is part of a unfinished text
partial: Indicates whether the text content is part of an unfinished text
stream. Only used for streaming mode and when the content is plain text.
turn_complete: Indicates whether the response from the model is complete.
Only used for streaming mode.
Expand Down Expand Up @@ -69,7 +69,7 @@ class LlmResponse(BaseModel):
"""The grounding metadata of the response."""

partial: Optional[bool] = None
"""Indicates whether the text content is part of a unfinished text stream.
"""Indicates whether the text content is part of an unfinished text stream.

Only used for streaming mode and when the content is plain text.
"""
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/plugins/save_files_as_artifacts_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ async def on_user_message_callback(
continue

try:
# Use display_name if available, otherwise generate a filename
# Use display_name if available; otherwise, generate a filename
file_name = part.inline_data.display_name
if not file_name:
file_name = f'artifact_{invocation_context.invocation_id}_{i}'
Expand Down
6 changes: 3 additions & 3 deletions src/google/adk/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -1044,7 +1044,7 @@ async def _setup_context_for_new_invocation(
"""Sets up the context for a new invocation.

Args:
session: The session to setup the invocation context for.
session: The session to set up the invocation context for.
new_message: The new message to process and append to the session.
run_config: The run config of the agent.
state_delta: Optional state changes to apply to the session.
Expand Down Expand Up @@ -1083,7 +1083,7 @@ async def _setup_context_for_resumed_invocation(
"""Sets up the context for a resumed invocation.

Args:
session: The session to setup the invocation context for.
session: The session to set up the invocation context for.
new_message: The new message to process and append to the session.
invocation_id: The invocation id to resume.
run_config: The run config of the agent.
Expand All @@ -1099,7 +1099,7 @@ async def _setup_context_for_resumed_invocation(
if not session.events:
raise ValueError(f'Session {session.id} has no events to resume.')

# Step 1: Maybe retrive a previous user message for the invocation.
# Step 1: Maybe retrieve a previous user message for the invocation.
user_message = new_message or self._find_user_message_for_invocation(
session.events, invocation_id
)
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/sessions/database_session_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ class StorageEvent(Base):
PreciseTimestamp, default=func.now()
)

# === Fileds from llm_response.py ===
# === Fields from llm_response.py ===
content: Mapped[dict[str, Any]] = mapped_column(DynamicJSON, nullable=True)
grounding_metadata: Mapped[dict[str, Any]] = mapped_column(
DynamicJSON, nullable=True
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/telemetry/google_cloud.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def get_gcp_exporters(

Args:
enable_tracing: whether to enable tracing to Cloud Trace.
enable_metrics: whether to enable raporting metrics to Cloud Monitoring.
enable_metrics: whether to enable reporting metrics to Cloud Monitoring.
enable_logging: whether to enable sending logs to Cloud Logging.
google_auth: optional custom credentials and project_id. google.auth.default() used when this is omitted.
"""
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/telemetry/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def trace_agent_invocation(
Args:
span: Span on which attributes are set.
agent: Agent from which attributes are gathered.
ctx: InvocationContext from which attrbiutes are gathered.
ctx: InvocationContext from which attributes are gathered.

Inference related fields are not set, due to their planned removal from invoke_agent span:
https://github.com/open-telemetry/semantic-conventions/issues/2632
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/tools/_google_credentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ async def get_valid_credentials(
else None
)

# If credentails are empty use the default credential
# If credentials are empty use the default credential
if not creds:
creds = self.credentials_config.credentials

Expand Down
Loading