diff --git a/.gemini/settings.json b/.gemini/settings.json
new file mode 100644
index 000000000..ebf257e01
--- /dev/null
+++ b/.gemini/settings.json
@@ -0,0 +1,3 @@
+{
+ "contextFileName": "AGENTS.md"
+}
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 7c2ffdd95..f04f3f039 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -31,5 +31,8 @@ If applicable, add screenshots to help explain your problem.
- Python version(python -V):
- ADK version(pip show google-adk):
+ **Model Information:**
+ For example, which model is being used.
+
**Additional context**
Add any other context about the problem here.
diff --git a/.github/workflows/pr-commit-check.yml b/.github/workflows/pr-commit-check.yml
new file mode 100644
index 000000000..ec6644311
--- /dev/null
+++ b/.github/workflows/pr-commit-check.yml
@@ -0,0 +1,62 @@
+# .github/workflows/pr-commit-check.yml
+# This GitHub Action workflow checks if a pull request has more than one commit.
+# If it does, it fails the check and instructs the user to squash their commits.
+
+name: 'PR Commit Check'
+
+# This workflow runs on pull request events.
+# It's configured to run on any pull request that is opened or synchronized (new commits pushed).
+on:
+ pull_request:
+ types: [opened, synchronize]
+
+# Defines the jobs that will run as part of the workflow.
+jobs:
+ check-commit-count:
+ # The type of runner that the job will run on. 'ubuntu-latest' is a good default.
+ runs-on: ubuntu-latest
+
+ # The steps that will be executed as part of the job.
+ steps:
+ # Step 1: Check out the code
+ # This action checks out your repository under $GITHUB_WORKSPACE, so your workflow can access it.
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # We need to fetch all commits to accurately count them.
+ # '0' means fetch all history for all branches and tags.
+ fetch-depth: 0
+
+ # Step 2: Count the commits in the pull request
+ # This step runs a script to get the number of commits in the PR.
+ - name: Count Commits
+ id: count_commits
+ # We use `git rev-list --count` to count the commits.
+ # ${{ github.event.pull_request.base.sha }} is the commit SHA of the base branch.
+ # ${{ github.event.pull_request.head.sha }} is the commit SHA of the head branch (the PR branch).
+ # The '..' syntax gives us the list of commits in the head branch that are not in the base branch.
+ # The output of the command (the count) is stored in a step output variable named 'count'.
+ run: |
+ count=$(git rev-list --count ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }})
+ echo "commit_count=$count" >> $GITHUB_OUTPUT
+
+ # Step 3: Check if the commit count is greater than 1
+ # This step uses the output from the previous step to decide whether to pass or fail.
+ - name: Check Commit Count
+ # This step only runs if the 'commit_count' output from the 'count_commits' step is greater than 1.
+ if: steps.count_commits.outputs.commit_count > 1
+ # If the condition is met, the workflow will exit with a failure status.
+ run: |
+ echo "This pull request has ${{ steps.count_commits.outputs.commit_count }} commits."
+ echo "Please squash them into a single commit before merging."
+ echo "You can use git rebase -i HEAD~N"
+ echo "...where N is the number of commits you want to squash together. The PR check conveniently tells you this number! For example, if the check says you have 3 commits, you would run: git rebase -i HEAD~3."
+ echo "Because you have rewritten the commit history, you must use the --force flag to update the pull request: git push --force"
+ exit 1
+
+ # Step 4: Success message
+ # This step runs if the commit count is not greater than 1 (i.e., it's 1).
+ - name: Success
+ if: steps.count_commits.outputs.commit_count <= 1
+ run: |
+ echo "This pull request has a single commit. Great job!"
diff --git a/.github/workflows/python-unit-tests.yml b/.github/workflows/python-unit-tests.yml
index d4af7b13a..52e61b8a3 100644
--- a/.github/workflows/python-unit-tests.yml
+++ b/.github/workflows/python-unit-tests.yml
@@ -36,8 +36,8 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Install uv
- run: curl -LsSf https://astral.sh/uv/install.sh | sh
+ - name: Install the latest version of uv
+ uses: astral-sh/setup-uv@v6
- name: Install dependencies
run: |
@@ -51,6 +51,7 @@ jobs:
if [[ "${{ matrix.python-version }}" == "3.9" ]]; then
pytest tests/unittests \
--ignore=tests/unittests/a2a \
+ --ignore=tests/unittests/tools/mcp_tool \
--ignore=tests/unittests/artifacts/test_artifact_service.py \
--ignore=tests/unittests/tools/google_api_tool/test_googleapi_to_openapi_converter.py
else
diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml
index 2e258857f..937a3d7d2 100644
--- a/.github/workflows/triage.yml
+++ b/.github/workflows/triage.yml
@@ -40,4 +40,5 @@ jobs:
ISSUE_TITLE: ${{ github.event.issue.title }}
ISSUE_BODY: ${{ github.event.issue.body }}
ISSUE_COUNT_TO_PROCESS: '3' # Process 3 issues at a time on schedule
- run: python contributing/samples/adk_triaging_agent/main.py
+ PYTHONPATH: contributing/samples
+ run: python -m adk_triaging_agent.main
diff --git a/.gitignore b/.gitignore
index 6fb068d48..6f398cbf9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -82,6 +82,7 @@ log/
.env.development.local
.env.test.local
.env.production.local
+uv.lock
# Google Cloud specific
.gcloudignore
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 000000000..70ce7365f
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,114 @@
+# Gemini CLI / Gemini Code Assist Context
+
+This document provides context for the Gemini CLI and Gemini Code Assist to understand the project and assist with development.
+
+## Project Overview
+
+The Agent Development Kit (ADK) is an open-source, code-first Python toolkit for building, evaluating, and deploying sophisticated AI agents with flexibility and control. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and is built for compatibility with other frameworks. ADK was designed to make agent development feel more like software development, to make it easier for developers to create, deploy, and orchestrate agentic architectures that range from simple tasks to complex workflows.
+
+## ADK: Style Guides
+
+### Python Style Guide
+
+The project follows the Google Python Style Guide. Key conventions are enforced using `pylint` with the provided `pylintrc` configuration file. Here are some of the key style points:
+
+* **Indentation**: 2 spaces.
+* **Line Length**: Maximum 80 characters.
+* **Naming Conventions**:
+ * `function_and_variable_names`: `snake_case`
+ * `ClassNames`: `CamelCase`
+ * `CONSTANTS`: `UPPERCASE_SNAKE_CASE`
+* **Docstrings**: Required for all public modules, functions, classes, and methods.
+* **Imports**: Organized and sorted.
+* **Error Handling**: Specific exceptions should be caught, not general ones like `Exception`.
+
+### Autoformat
+
+We have autoformat.sh to help solve import organize and formatting issues.
+
+```bash
+# Run in open_source_workspace/
+$ ./autoformat.sh
+```
+
+### In ADK source
+
+Below styles applies to the ADK source code (under `src/` folder of the Github.
+repo).
+
+#### Use relative imports
+
+```python
+# DO
+from ..agents.llm_agent import LlmAgent
+
+# DON'T
+from google.adk.agents.llm_agent import LlmAgent
+```
+
+#### Import from module, not from `__init__.py`
+
+```python
+# DO
+from ..agents.llm_agent import LlmAgent
+
+# DON'T
+from ..agents import LlmAgent # import from agents/__init__.py
+```
+
+#### Always do `from __future__ import annotations`
+
+```python
+# DO THIS, right after the open-source header.
+from __future__ import annotations
+```
+
+Like below:
+
+```python
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+# ... the rest of the file.
+```
+
+This allows us to forward-reference a class without quotes.
+
+Check out go/pep563 for details.
+
+### In ADK tests
+
+#### Use absolute imports
+
+In tests, we use `google.adk` same as how our users uses.
+
+```python
+# DO
+from google.adk.agents.llm_agent import LlmAgent
+
+# DON'T
+from ..agents.llm_agent import LlmAgent
+```
+
+## ADK: Local testing
+
+### Unit tests
+
+Run below command:
+
+```bash
+$ pytest tests/unittests
+```
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 04740bb7a..b6bba2692 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,84 @@
# Changelog
+## [1.5.0](https://github.com/google/adk-python/compare/v1.4.2...v1.5.0) (2025-06-25)
+
+
+### Features
+
+* Add a new option `eval_storage_uri` in adk web & adk eval to specify GCS bucket to store eval data ([fa025d7](https://github.com/google/adk-python/commit/fa025d755978e1506fa0da1fecc49775bebc1045))
+* Add ADK examples for litellm with add_function_to_prompt ([f33e090](https://github.com/google/adk-python/commit/f33e0903b21b752168db3006dd034d7d43f7e84d))
+* Add implementation of VertexAiMemoryBankService and support in FastAPI endpoint ([abc89d2](https://github.com/google/adk-python/commit/abc89d2c811ba00805f81b27a3a07d56bdf55a0b))
+* Add rouge_score library to ADK eval dependencies, and implement RougeEvaluator that is computes ROUGE-1 for "response_match_score" metric ([9597a44](https://github.com/google/adk-python/commit/9597a446fdec63ad9e4c2692d6966b14f80ff8e2))
+* Add usage span attributes to telemetry ([#356](https://github.com/google/adk-python/issues/356)) ([ea69c90](https://github.com/google/adk-python/commit/ea69c9093a16489afdf72657136c96f61c69cafd))
+* Add Vertex Express mode compatibility for VertexAiSessionService ([00cc8cd](https://github.com/google/adk-python/commit/00cc8cd6433fc45ecfc2dbaa04dbbc1a81213b4d))
+
+
+### Bug Fixes
+
+* Include current turn context when include_contents='none' ([9e473e0](https://github.com/google/adk-python/commit/9e473e0abdded24e710fd857782356c15d04b515))
+* Make LiteLLM streaming truly asynchronous ([bd67e84](https://github.com/google/adk-python/commit/bd67e8480f6e8b4b0f8c22b94f15a8cda1336339))
+* Make raw_auth_credential and exchanged_auth_credential optional given their default value is None ([acbdca0](https://github.com/google/adk-python/commit/acbdca0d8400e292ba5525931175e0d6feab15f1))
+* Minor typo fix in the agent instruction ([ef3c745](https://github.com/google/adk-python/commit/ef3c745d655538ebd1ed735671be615f842341a8))
+* Typo fix in sample agent instruction ([ef3c745](https://github.com/google/adk-python/commit/ef3c745d655538ebd1ed735671be615f842341a8))
+* Update contributing links ([a1e1441](https://github.com/google/adk-python/commit/a1e14411159fd9f3e114e15b39b4949d0fd6ecb1))
+* Use starred tuple unpacking on GCS artifact blob names ([3b1d9a8](https://github.com/google/adk-python/commit/3b1d9a8a3e631ca2d86d30f09640497f1728986c))
+
+
+### Chore
+
+* Do not send api request when session does not have events ([88a4402](https://github.com/google/adk-python/commit/88a4402d142672171d0a8ceae74671f47fa14289))
+* Leverage official uv action for install([09f1269](https://github.com/google/adk-python/commit/09f1269bf7fa46ab4b9324e7f92b4f70ffc923e5))
+* Update google-genai package and related deps to latest([ed7a21e](https://github.com/google/adk-python/commit/ed7a21e1890466fcdf04f7025775305dc71f603d))
+* Add credential service backed by session state([29cd183](https://github.com/google/adk-python/commit/29cd183aa1b47dc4f5d8afe22f410f8546634abc))
+* Clarify the behavior of Event.invocation_id([f033e40](https://github.com/google/adk-python/commit/f033e405c10ff8d86550d1419a9d63c0099182f9))
+* Send user message to the agent that returned a corresponding function call if user message is a function response([7c670f6](https://github.com/google/adk-python/commit/7c670f638bc17374ceb08740bdd057e55c9c2e12))
+* Add request converter to convert a2a request to ADK request([fb13963](https://github.com/google/adk-python/commit/fb13963deda0ff0650ac27771711ea0411474bf5))
+* Support allow_origins in cloud_run deployment ([2fd8feb](https://github.com/google/adk-python/commit/2fd8feb65d6ae59732fb3ec0652d5650f47132cc))
+
+## [1.4.2](https://github.com/google/adk-python/compare/v1.4.1...v1.4.2) (2025-06-20)
+
+
+### Bug Fixes
+
+* Add type checking to handle different response type of genai API client ([4d72d31](https://github.com/google/adk-python/commit/4d72d31b13f352245baa72b78502206dcbe25406))
+ * This fixes the broken VertexAiSessionService
+* Allow more credentials types for BigQuery tools ([2f716ad](https://github.com/google/adk-python/commit/2f716ada7fbcf8e03ff5ae16ce26a80ca6fd7bf6))
+
+## [1.4.1](https://github.com/google/adk-python/compare/v1.3.0...v1.4.1) (2025-06-18)
+
+
+### Features
+
+* Add Authenticated Tool (Experimental) ([dcea776](https://github.com/google/adk-python/commit/dcea7767c67c7edfb694304df32dca10b74c9a71))
+* Add enable_affective_dialog and proactivity to run_config and llm_request ([fe1d5aa](https://github.com/google/adk-python/commit/fe1d5aa439cc56b89d248a52556c0a9b4cbd15e4))
+* Add import session API in the fast API ([233fd20](https://github.com/google/adk-python/commit/233fd2024346abd7f89a16c444de0cf26da5c1a1))
+* Add integration tests for litellm with and without turning on add_function_to_prompt ([8e28587](https://github.com/google/adk-python/commit/8e285874da7f5188ea228eb4d7262dbb33b1ae6f))
+* Allow data_store_specs pass into ADK VAIS built-in tool ([675faef](https://github.com/google/adk-python/commit/675faefc670b5cd41991939fe0fc604df331111a))
+* Enable MCP Tool Auth (Experimental) ([157d9be](https://github.com/google/adk-python/commit/157d9be88d92f22320604832e5a334a6eb81e4af))
+* Implement GcsEvalSetResultsManager to handle storage of eval sets on GCS, and refactor eval set results manager ([0a5cf45](https://github.com/google/adk-python/commit/0a5cf45a75aca7b0322136b65ca5504a0c3c7362))
+* Re-factor some eval sets manager logic, and implement GcsEvalSetsManager to handle storage of eval sets on GCS ([1551bd4](https://github.com/google/adk-python/commit/1551bd4f4d7042fffb497d9308b05f92d45d818f))
+* Support real time input config ([d22920b](https://github.com/google/adk-python/commit/d22920bd7f827461afd649601326b0c58aea6716))
+* Support refresh access token automatically for rest_api_tool ([1779801](https://github.com/google/adk-python/commit/177980106b2f7be9a8c0a02f395ff0f85faa0c5a))
+
+### Bug Fixes
+
+* Fix Agent generate config err ([#1305](https://github.com/google/adk-python/issues/1305)) ([badbcbd](https://github.com/google/adk-python/commit/badbcbd7a464e6b323cf3164d2bcd4e27cbc057f))
+* Fix Agent generate config error ([#1450](https://github.com/google/adk-python/issues/1450)) ([694b712](https://github.com/google/adk-python/commit/694b71256c631d44bb4c4488279ea91d82f43e26))
+* Fix liteLLM test failures ([fef8778](https://github.com/google/adk-python/commit/fef87784297b806914de307f48c51d83f977298f))
+* Fix tracing for live ([58e07ca](https://github.com/google/adk-python/commit/58e07cae83048d5213d822be5197a96be9ce2950))
+* Merge custom http options with adk specific http options in model api request ([4ccda99](https://github.com/google/adk-python/commit/4ccda99e8ec7aa715399b4b83c3f101c299a95e8))
+* Remove unnecessary double quote on Claude docstring ([bbceb4f](https://github.com/google/adk-python/commit/bbceb4f2e89f720533b99cf356c532024a120dc4))
+* Set explicit project in the BigQuery client ([6d174eb](https://github.com/google/adk-python/commit/6d174eba305a51fcf2122c0fd481378752d690ef))
+* Support streaming in litellm + adk and add corresponding integration tests ([aafa80b](https://github.com/google/adk-python/commit/aafa80bd85a49fb1c1a255ac797587cffd3fa567))
+* Support project-based gemini model path to use google_search_tool ([b2fc774](https://github.com/google/adk-python/commit/b2fc7740b363a4e33ec99c7377f396f5cee40b5a))
+* Update conversion between Celsius and Fahrenheit ([1ae176a](https://github.com/google/adk-python/commit/1ae176ad2fa2b691714ac979aec21f1cf7d35e45))
+
+### Chores
+
+* Set `agent_engine_id` in the VertexAiSessionService constructor, also use the `agent_engine_id` field instead of overriding `app_name` in FastAPI endpoint ([fc65873](https://github.com/google/adk-python/commit/fc65873d7c31be607f6cd6690f142a031631582a))
+
+
+
## [1.3.0](https://github.com/google/adk-python/compare/v1.2.1...v1.3.0) (2025-06-11)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c0f3d0069..733f1143b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -49,6 +49,7 @@ This project follows
## Requirement for PRs
+- Each PR should only have one commit. Please squash it if there are multiple PRs.
- All PRs, other than small documentation or typo fixes, should have a Issue assoicated. If not, please create one.
- Small, focused PRs. Keep changes minimalβone concern per PR.
- For bug fixes or features, please provide logs or screenshot after the fix is applied to help reviewers better understand the fix.
@@ -147,11 +148,11 @@ For any changes that impact user-facing documentation (guides, API reference, tu
pytest ./tests/unittests
```
- NOTE: for accurately repro test failure, only include `test` and `eval` as
- extra dependencies.
+ NOTE: for accurate repro of test failure, only include `test`, `eval` and
+ `a2a` as extra dependencies.
```shell
- uv sync --extra test --extra eval
+ uv sync --extra test --extra eval --extra a2a
pytest ./tests/unittests
```
@@ -200,7 +201,7 @@ For any changes that impact user-facing documentation (guides, API reference, tu
## Contributing Resources
-[Contributing folder](https://github.com/google/adk-python/tree/main/contributing/samples) has resources that is helpful for contributors.
+[Contributing folder](https://github.com/google/adk-python/tree/main/contributing) has resources that is helpful for contributors.
## Code reviews
diff --git a/README.md b/README.md
index 7bd5e7401..874658d07 100644
--- a/README.md
+++ b/README.md
@@ -135,7 +135,7 @@ adk eval \
## π€ Contributing
We welcome contributions from the community! Whether it's bug reports, feature requests, documentation improvements, or code contributions, please see our
-- [General contribution guideline and flow](https://google.github.io/adk-docs/contributing-guide/#questions).
+- [General contribution guideline and flow](https://google.github.io/adk-docs/contributing-guide/).
- Then if you want to contribute code, please read [Code Contributing Guidelines](./CONTRIBUTING.md) to get started.
## π License
diff --git a/contributing/samples/a2a_auth/README.md b/contributing/samples/a2a_auth/README.md
new file mode 100644
index 000000000..00382661c
--- /dev/null
+++ b/contributing/samples/a2a_auth/README.md
@@ -0,0 +1,183 @@
+# A2A OAuth Authentication Sample Agent
+
+This sample demonstrates the **Agent-to-Agent (A2A)** architecture with **OAuth Authentication** workflows in the Agent Development Kit (ADK). The sample implements a multi-agent system where a remote agent can surface OAuth authentication requests to the local agent, which then guides the end user through the OAuth flow before returning the authentication credentials to the remote agent for API access.
+
+## Overview
+
+The A2A OAuth Authentication sample consists of:
+
+- **Root Agent** (`root_agent`): The main orchestrator that handles user requests and delegates tasks to specialized agents
+- **YouTube Search Agent** (`youtube_search_agent`): A local agent that handles YouTube video searches using LangChain tools
+- **BigQuery Agent** (`bigquery_agent`): A remote A2A agent that manages BigQuery operations and requires OAuth authentication for Google Cloud access
+
+## Architecture
+
+```
+βββββββββββββββββββ ββββββββββββββββββββββ ββββββββββββββββββββ
+β End User βββββΆβ Root Agent βββββΆβ BigQuery Agent β
+β (OAuth Flow) β β (Local) β β (Remote A2A) β
+β β β β β (localhost:8001) β
+β OAuth UI ββββββ ββββββ OAuth Request β
+βββββββββββββββββββ ββββββββββββββββββββββ ββββββββββββββββββββ
+```
+
+## Key Features
+
+### 1. **Multi-Agent Architecture**
+- Root agent coordinates between local YouTube search and remote BigQuery operations
+- Demonstrates hybrid local/remote agent workflows
+- Seamless task delegation based on user request types
+
+### 2. **OAuth Authentication Workflow**
+- Remote BigQuery agent surfaces OAuth authentication requests to the root agent
+- Root agent guides end users through Google OAuth flow for BigQuery access
+- Secure token exchange between agents for authenticated API calls
+
+### 3. **Google Cloud Integration**
+- BigQuery toolset with comprehensive dataset and table management capabilities
+- OAuth-protected access to user's Google Cloud BigQuery resources
+- Support for listing, creating, and managing datasets and tables
+
+### 4. **LangChain Tool Integration**
+- YouTube search functionality using LangChain community tools
+- Demonstrates integration of third-party tools in agent workflows
+
+## Setup and Usage
+
+### Prerequisites
+
+1. **Set up OAuth Credentials**:
+ ```bash
+ export OAUTH_CLIENT_ID=your_google_oauth_client_id
+ export OAUTH_CLIENT_SECRET=your_google_oauth_client_secret
+ ```
+
+2. **Start the Remote BigQuery Agent server**:
+ ```bash
+ # Start the remote a2a server that serves the BigQuery agent on port 8001
+ adk api_server --a2a --port 8001 contributing/samples/a2a_auth/remote_a2a
+ ```
+
+3. **Run the Main Agent**:
+ ```bash
+ # In a separate terminal, run the adk web server
+ adk web contributing/samples/
+ ```
+
+### Example Interactions
+
+Once both services are running, you can interact with the root agent:
+
+**YouTube Search (No Authentication Required):**
+```
+User: Search for 3 Taylor Swift music videos
+Agent: I'll help you search for Taylor Swift music videos on YouTube.
+[Agent delegates to YouTube Search Agent]
+Agent: I found 3 Taylor Swift music videos:
+1. "Anti-Hero" - Official Music Video
+2. "Shake It Off" - Official Music Video
+3. "Blank Space" - Official Music Video
+```
+
+**BigQuery Operations (OAuth Required):**
+```
+User: List my BigQuery datasets
+Agent: I'll help you access your BigQuery datasets. This requires authentication with your Google account.
+[Agent delegates to BigQuery Agent]
+Agent: To access your BigQuery data, please complete the OAuth authentication.
+[OAuth flow initiated - user redirected to Google authentication]
+User: [Completes OAuth flow in browser]
+Agent: Authentication successful! Here are your BigQuery datasets:
+- dataset_1: Customer Analytics
+- dataset_2: Sales Data
+- dataset_3: Marketing Metrics
+```
+
+**Dataset Management:**
+```
+User: Show me details for my Customer Analytics dataset
+Agent: I'll get the details for your Customer Analytics dataset.
+[Using existing OAuth token]
+Agent: Customer Analytics Dataset Details:
+- Created: 2024-01-15
+- Location: US
+- Tables: 5
+- Description: Customer behavior and analytics data
+```
+
+## Code Structure
+
+### Main Agent (`agent.py`)
+
+- **`youtube_search_agent`**: Local agent with LangChain YouTube search tool
+- **`bigquery_agent`**: Remote A2A agent configuration for BigQuery operations
+- **`root_agent`**: Main orchestrator with task delegation logic
+
+### Remote BigQuery Agent (`remote_a2a/bigquery_agent/`)
+
+- **`agent.py`**: Implementation of the BigQuery agent with OAuth toolset
+- **`agent.json`**: Agent card of the A2A agent
+- **`BigQueryToolset`**: OAuth-enabled tools for BigQuery dataset and table management
+
+## OAuth Authentication Workflow
+
+The OAuth authentication process follows this pattern:
+
+1. **Initial Request**: User requests BigQuery operation through root agent
+2. **Delegation**: Root agent delegates to remote BigQuery agent
+3. **Auth Check**: BigQuery agent checks for valid OAuth token
+4. **Auth Request**: If no token, agent surfaces OAuth request to root agent
+5. **User OAuth**: Root agent guides user through Google OAuth flow
+6. **Token Exchange**: Root agent sends OAuth token to BigQuery agent
+7. **API Call**: BigQuery agent uses token to make authenticated API calls
+8. **Result Return**: BigQuery agent returns results through root agent to user
+
+## Supported BigQuery Operations
+
+The BigQuery agent supports the following operations:
+
+### Dataset Operations:
+- **List Datasets**: `bigquery_datasets_list` - Get all user's datasets
+- **Get Dataset**: `bigquery_datasets_get` - Get specific dataset details
+- **Create Dataset**: `bigquery_datasets_insert` - Create new dataset
+
+### Table Operations:
+- **List Tables**: `bigquery_tables_list` - Get tables in a dataset
+- **Get Table**: `bigquery_tables_get` - Get specific table details
+- **Create Table**: `bigquery_tables_insert` - Create new table in dataset
+
+## Extending the Sample
+
+You can extend this sample by:
+
+- Adding more Google Cloud services (Cloud Storage, Compute Engine, etc.)
+- Implementing token refresh and expiration handling
+- Adding role-based access control for different BigQuery operations
+- Creating OAuth flows for other providers (Microsoft, Facebook, etc.)
+- Adding audit logging for authentication events
+- Implementing multi-tenant OAuth token management
+
+## Troubleshooting
+
+**Connection Issues:**
+- Ensure the local ADK web server is running on port 8000
+- Ensure the remote A2A server is running on port 8001
+- Check that no firewall is blocking localhost connections
+- Verify the agent.json URL matches the running A2A server
+
+**OAuth Issues:**
+- Verify OAuth client ID and secret are correctly set in .env file
+- Ensure OAuth redirect URIs are properly configured in Google Cloud Console
+- Check that the OAuth scopes include BigQuery access permissions
+- Verify the user has access to the BigQuery projects/datasets
+
+**BigQuery Access Issues:**
+- Ensure the authenticated user has BigQuery permissions
+- Check that the Google Cloud project has BigQuery API enabled
+- Verify dataset and table names are correct and accessible
+- Check for quota limits on BigQuery API calls
+
+**Agent Communication Issues:**
+- Check the logs for both the local ADK web server and remote A2A server
+- Verify OAuth tokens are properly passed between agents
+- Ensure agent instructions are clear about authentication requirements
diff --git a/contributing/samples/bigquery_agent/__init__.py b/contributing/samples/a2a_auth/__init__.py
similarity index 100%
rename from contributing/samples/bigquery_agent/__init__.py
rename to contributing/samples/a2a_auth/__init__.py
diff --git a/contributing/samples/a2a_auth/agent.py b/contributing/samples/a2a_auth/agent.py
new file mode 100644
index 000000000..f914d0c0a
--- /dev/null
+++ b/contributing/samples/a2a_auth/agent.py
@@ -0,0 +1,62 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.adk.agents import Agent
+from google.adk.agents.remote_a2a_agent import RemoteA2aAgent
+from google.adk.tools.langchain_tool import LangchainTool
+from langchain_community.tools import YouTubeSearchTool
+
+# Instantiate the tool
+langchain_yt_tool = YouTubeSearchTool()
+
+# Wrap the tool in the LangchainTool class from ADK
+adk_yt_tool = LangchainTool(
+ tool=langchain_yt_tool,
+)
+
+youtube_search_agent = Agent(
+ name="youtube_search_agent",
+ model="gemini-2.0-flash", # Replace with the actual model name
+ instruction="""
+ Ask customer to provide singer name, and the number of videos to search.
+ """,
+ description="Help customer to search for a video on Youtube.",
+ tools=[adk_yt_tool],
+ output_key="youtube_search_output",
+)
+
+bigquery_agent = RemoteA2aAgent(
+ name="bigquery_agent",
+ description="Help customer to manage notion workspace.",
+ agent_card=(
+ "http://localhost:8001/a2a/bigquery_agent/.well-known/agent.json"
+ ),
+)
+
+root_agent = Agent(
+ model="gemini-2.0-flash",
+ name="root_agent",
+ instruction="""
+ You are a helpful assistant that can help search youtube videos, look up BigQuery datasets and tables.
+ You delegate youtube search tasks to the youtube_search_agent.
+ You delegate BigQuery tasks to the bigquery_agent.
+ Always clarify the results before proceeding.
+ """,
+ global_instruction=(
+ "You are a helpful assistant that can help search youtube videos, look"
+ " up BigQuery datasets and tables."
+ ),
+ sub_agents=[youtube_search_agent, bigquery_agent],
+)
diff --git a/contributing/samples/live_bidi_streaming_agent/__init__.py b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from contributing/samples/live_bidi_streaming_agent/__init__.py
rename to contributing/samples/a2a_auth/remote_a2a/bigquery_agent/__init__.py
diff --git a/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.json b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.json
new file mode 100644
index 000000000..2e11e74fa
--- /dev/null
+++ b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.json
@@ -0,0 +1,29 @@
+{
+ "capabilities": {},
+ "defaultInputModes": ["text/plain"],
+ "defaultOutputModes": ["application/json"],
+ "description": "A Google BigQuery agent that helps manage users' data on Google BigQuery. Can list, get, and create datasets, as well as manage tables within datasets. Supports OAuth authentication for secure access to BigQuery resources.",
+ "name": "bigquery_agent",
+ "skills": [
+ {
+ "id": "dataset_management",
+ "name": "Dataset Management",
+ "description": "List, get details, and create BigQuery datasets",
+ "tags": ["bigquery", "datasets", "google-cloud"]
+ },
+ {
+ "id": "table_management",
+ "name": "Table Management",
+ "description": "List, get details, and create BigQuery tables within datasets",
+ "tags": ["bigquery", "tables", "google-cloud"]
+ },
+ {
+ "id": "oauth_authentication",
+ "name": "OAuth Authentication",
+ "description": "Secure authentication with Google BigQuery using OAuth",
+ "tags": ["authentication", "oauth", "security"]
+ }
+ ],
+ "url": "http://localhost:8000/a2a/bigquery_agent",
+ "version": "1.0.0"
+}
diff --git a/contributing/samples/bigquery_agent/agent.py b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py
similarity index 100%
rename from contributing/samples/bigquery_agent/agent.py
rename to contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py
diff --git a/contributing/samples/a2a_basic/README.md b/contributing/samples/a2a_basic/README.md
new file mode 100644
index 000000000..d2bc84b0c
--- /dev/null
+++ b/contributing/samples/a2a_basic/README.md
@@ -0,0 +1,120 @@
+# A2A Basic Sample Agent
+
+This sample demonstrates the **Agent-to-Agent (A2A)** architecture in the Agent Development Kit (ADK), showcasing how multiple agents can work together to handle complex tasks. The sample implements an agent that can roll dice and check if numbers are prime.
+
+## Overview
+
+The A2A Basic sample consists of:
+
+- **Root Agent** (`root_agent`): The main orchestrator that delegates tasks to specialized sub-agents
+- **Roll Agent** (`roll_agent`): A local sub-agent that handles dice rolling operations
+- **Prime Agent** (`prime_agent`): A remote A2A agent that checks if numbers are prime, this agent is running on a separate A2A server
+
+## Architecture
+
+```
+βββββββββββββββββββ ββββββββββββββββββββ ββββββββββββββββββββββ
+β Root Agent βββββΆβ Roll Agent β β Remote Prime β
+β (Local) β β (Local) β β Agent β
+β β β β β (localhost:8001) β
+β βββββΆβ ββββββ β
+βββββββββββββββββββ ββββββββββββββββββββ ββββββββββββββββββββββ
+```
+
+## Key Features
+
+### 1. **Local Sub-Agent Integration**
+- The `roll_agent` demonstrates how to create and integrate local sub-agents
+- Handles dice rolling with configurable number of sides
+- Uses a simple function tool (`roll_die`) for random number generation
+
+### 2. **Remote A2A Agent Integration**
+- The `prime_agent` shows how to connect to remote agent services
+- Communicates with a separate service via HTTP at `http://localhost:8001/a2a/check_prime_agent`
+- Demonstrates cross-service agent communication
+
+### 3. **Agent Orchestration**
+- The root agent intelligently delegates tasks based on user requests
+- Can chain operations (e.g., "roll a die and check if it's prime")
+- Provides clear workflow coordination between multiple agents
+
+### 4. **Example Tool Integration**
+- Includes an `ExampleTool` with sample interactions for context
+- Helps the agent understand expected behavior patterns
+
+## Setup and Usage
+
+### Prerequisites
+
+1. **Start the Remote Prime Agent server**:
+ ```bash
+ # Start the remote a2a server that serves the check prime agent on port 8001
+ adk api_server --a2a --port 8001 contributing/samples/a2a_basic/remote_a2a
+ ```
+
+2. **Run the Main Agent**:
+ ```bash
+ # In a separate terminal, run the adk web server
+ adk web contributing/samples/
+ ```
+
+### Example Interactions
+
+Once both services are running, you can interact with the root agent:
+
+**Simple Dice Rolling:**
+```
+User: Roll a 6-sided die
+Bot: I rolled a 4 for you.
+```
+
+**Prime Number Checking:**
+```
+User: Is 7 a prime number?
+Bot: Yes, 7 is a prime number.
+```
+
+**Combined Operations:**
+```
+User: Roll a 10-sided die and check if it's prime
+Bot: I rolled an 8 for you.
+Bot: 8 is not a prime number.
+```
+
+## Code Structure
+
+### Main Agent (`agent.py`)
+
+- **`roll_die(sides: int)`**: Function tool for rolling dice
+- **`roll_agent`**: Local agent specialized in dice rolling
+- **`prime_agent`**: Remote A2A agent configuration
+- **`root_agent`**: Main orchestrator with delegation logic
+
+### Remote Prime Agent (`remote_a2a/check_prime_agent/`)
+
+- **`agent.py`**: Implementation of the prime checking service
+- **`agent.json`**: Agent card of the A2A agent
+- **`check_prime(nums: list[int])`**: Prime number checking algorithm
+
+
+## Extending the Sample
+
+You can extend this sample by:
+
+- Adding more mathematical operations (factorization, square roots, etc.)
+- Creating additional remote agent
+- Implementing more complex delegation logic
+- Adding persistent state management
+- Integrating with external APIs or databases
+
+## Troubleshooting
+
+**Connection Issues:**
+- Ensure the local ADK web server is running on port 8000
+- Ensure the remote A2A server is running on port 8001
+- Check that no firewall is blocking localhost connections
+- Verify the agent.json URL matches the running A2A server
+
+**Agent Not Responding:**
+- Check the logs for both the local ADK web server on port 8000 and remote A2A server on port 8001
+- Verify the agent instructions are clear and unambiguous
diff --git a/contributing/samples/a2a_basic/__init__.py b/contributing/samples/a2a_basic/__init__.py
new file mode 100755
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/a2a_basic/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/a2a_basic/agent.py b/contributing/samples/a2a_basic/agent.py
new file mode 100755
index 000000000..275511f4d
--- /dev/null
+++ b/contributing/samples/a2a_basic/agent.py
@@ -0,0 +1,120 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+
+from google.adk.agents import Agent
+from google.adk.agents.remote_a2a_agent import RemoteA2aAgent
+from google.adk.tools.example_tool import ExampleTool
+from google.genai import types
+
+
+# --- Roll Die Sub-Agent ---
+def roll_die(sides: int) -> int:
+ """Roll a die and return the rolled result."""
+ return random.randint(1, sides)
+
+
+roll_agent = Agent(
+ name="roll_agent",
+ description="Handles rolling dice of different sizes.",
+ instruction="""
+ You are responsible for rolling dice based on the user's request.
+ When asked to roll a die, you must call the roll_die tool with the number of sides as an integer.
+ """,
+ tools=[roll_die],
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
+
+
+example_tool = ExampleTool([
+ {
+ "input": {
+ "role": "user",
+ "parts": [{"text": "Roll a 6-sided die."}],
+ },
+ "output": [
+ {"role": "model", "parts": [{"text": "I rolled a 4 for you."}]}
+ ],
+ },
+ {
+ "input": {
+ "role": "user",
+ "parts": [{"text": "Is 7 a prime number?"}],
+ },
+ "output": [{
+ "role": "model",
+ "parts": [{"text": "Yes, 7 is a prime number."}],
+ }],
+ },
+ {
+ "input": {
+ "role": "user",
+ "parts": [{"text": "Roll a 10-sided die and check if it's prime."}],
+ },
+ "output": [
+ {
+ "role": "model",
+ "parts": [{"text": "I rolled an 8 for you."}],
+ },
+ {
+ "role": "model",
+ "parts": [{"text": "8 is not a prime number."}],
+ },
+ ],
+ },
+])
+
+prime_agent = RemoteA2aAgent(
+ name="prime_agent",
+ description="Agent that handles checking if numbers are prime.",
+ agent_card=(
+ "http://localhost:8001/a2a/check_prime_agent/.well-known/agent.json"
+ ),
+)
+
+
+root_agent = Agent(
+ model="gemini-1.5-flash",
+ name="root_agent",
+ instruction="""
+ You are a helpful assistant that can roll dice and check if numbers are prime.
+ You delegate rolling dice tasks to the roll_agent and prime checking tasks to the prime_agent.
+ Follow these steps:
+ 1. If the user asks to roll a die, delegate to the roll_agent.
+ 2. If the user asks to check primes, delegate to the prime_agent.
+ 3. If the user asks to roll a die and then check if the result is prime, call roll_agent first, then pass the result to prime_agent.
+ Always clarify the results before proceeding.
+ """,
+ global_instruction=(
+ "You are DicePrimeBot, ready to roll dice and check prime numbers."
+ ),
+ sub_agents=[roll_agent, prime_agent],
+ tools=[example_tool],
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
diff --git a/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/__init__.py b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/__init__.py
new file mode 100755
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.json b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.json
new file mode 100644
index 000000000..e625bc343
--- /dev/null
+++ b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.json
@@ -0,0 +1,17 @@
+{
+ "capabilities": {},
+ "defaultInputModes": ["text/plain"],
+ "defaultOutputModes": ["application/json"],
+ "description": "An agent specialized in checking whether numbers are prime. It can efficiently determine the primality of individual numbers or lists of numbers.",
+ "name": "check_prime_agent",
+ "skills": [
+ {
+ "id": "prime_checking",
+ "name": "Prime Number Checking",
+ "description": "Check if numbers in a list are prime using efficient mathematical algorithms",
+ "tags": ["mathematical", "computation", "prime", "numbers"]
+ }
+ ],
+ "url": "http://localhost:8001/a2a/check_prime_agent",
+ "version": "1.0.0"
+}
diff --git a/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.py b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.py
new file mode 100755
index 000000000..1a7cd5565
--- /dev/null
+++ b/contributing/samples/a2a_basic/remote_a2a/check_prime_agent/agent.py
@@ -0,0 +1,75 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+
+from google.adk import Agent
+from google.adk.tools.tool_context import ToolContext
+from google.genai import types
+
+
+async def check_prime(nums: list[int]) -> str:
+ """Check if a given list of numbers are prime.
+
+ Args:
+ nums: The list of numbers to check.
+
+ Returns:
+ A str indicating which number is prime.
+ """
+ primes = set()
+ for number in nums:
+ number = int(number)
+ if number <= 1:
+ continue
+ is_prime = True
+ for i in range(2, int(number**0.5) + 1):
+ if number % i == 0:
+ is_prime = False
+ break
+ if is_prime:
+ primes.add(number)
+ return (
+ 'No prime numbers found.'
+ if not primes
+ else f"{', '.join(str(num) for num in primes)} are prime numbers."
+ )
+
+
+root_agent = Agent(
+ model='gemini-2.0-flash',
+ name='check_prime_agent',
+ description='check prime agent that can check whether numbers are prime.',
+ instruction="""
+ You check whether numbers are prime.
+ When checking prime numbers, call the check_prime tool with a list of integers. Be sure to pass in a list of integers. You should never pass in a string.
+ You should not rely on the previous history on prime results.
+ """,
+ tools=[
+ check_prime,
+ ],
+ # planner=BuiltInPlanner(
+ # thinking_config=types.ThinkingConfig(
+ # include_thoughts=True,
+ # ),
+ # ),
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
diff --git a/contributing/samples/a2a_human_in_loop/README.md b/contributing/samples/a2a_human_in_loop/README.md
new file mode 100644
index 000000000..9cbce9c90
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/README.md
@@ -0,0 +1,135 @@
+# A2A Human-in-the-Loop Sample Agent
+
+This sample demonstrates the **Agent-to-Agent (A2A)** architecture with **Human-in-the-Loop** workflows in the Agent Development Kit (ADK). The sample implements a reimbursement processing agent that automatically handles small expenses while requiring remote agent to process for larger amounts. The remote agent will require a human approval for large amounts, thus surface this request to local agent and human interacting with local agent can approve the request.
+
+## Overview
+
+The A2A Human-in-the-Loop sample consists of:
+
+- **Root Agent** (`root_agent`): The main reimbursement agent that handles expense requests and delegates approval to remote Approval Agent for large amounts
+- **Approval Agent** (`approval_agent`): A remote A2A agent that handles the human approval process via long-running tools (which implements asynchronous approval workflows that can pause execution and wait for human input), this agent is running on a separate A2A server
+
+
+## Architecture
+
+```
+βββββββββββββββββββ ββββββββββββββββββββββ ββββββββββββββββββββ
+β Human Manager βββββΆβ Root Agent βββββΆβ Approval Agent β
+β (External) β β (Local) β β (Remote A2A) β
+β β β β β (localhost:8001) β
+β Approval UI ββββββ ββββββ β
+βββββββββββββββββββ ββββββββββββββββββββββ ββββββββββββββββββββ
+```
+
+## Key Features
+
+### 1. **Automated Decision Making**
+- Automatically approves reimbursements under $100
+- Uses business logic to determine when human intervention is required
+- Provides immediate responses for simple cases
+
+### 2. **Human-in-the-Loop Workflow**
+- Seamlessly escalates high-value requests (>$100) to remote approval agent
+- Remote approval agent uses long-running tools to surface approval requests back to the root agent
+- Human managers interact directly with the root agent to approve/reject requests
+
+### 3. **Long-Running Tool Integration**
+- Demonstrates `LongRunningFunctionTool` for asynchronous operations
+- Shows how to handle pending states and external updates
+- Implements proper tool response handling for delayed approvals
+
+### 4. **Remote A2A Agent Communication**
+- The approval agent runs as a separate service that processes approval workflows
+- Communicates via HTTP at `http://localhost:8001/a2a/human_in_loop`
+- Surfaces approval requests back to the root agent for human interaction
+
+## Setup and Usage
+
+### Prerequisites
+
+1. **Start the Remote Approval Agent server**:
+ ```bash
+ # Start the remote a2a server that serves the human-in-the-loop approval agent on port 8001
+ adk api_server --a2a --port 8001 contributing/samples/a2a_human_in_loop/remote_a2a
+ ```
+
+2. **Run the Main Agent**:
+ ```bash
+ # In a separate terminal, run the adk web server
+ adk web contributing/samples/
+ ```
+
+### Example Interactions
+
+Once both services are running, you can interact with the root agent through the approval workflow:
+
+**Automatic Approval (Under $100):**
+```
+User: Please reimburse $50 for meals
+Agent: I'll process your reimbursement request for $50 for meals. Since this amount is under $100, I can approve it automatically.
+Agent: β Reimbursement approved and processed: $50 for meals
+```
+
+**Human Approval Required (Over $100):**
+```
+User: Please reimburse $200 for conference travel
+Agent: I'll process your reimbursement request for $200 for conference travel. Since this amount exceeds $100, I need to get manager approval.
+Agent: π Request submitted for approval (Ticket: reimbursement-ticket-001). Please wait for manager review.
+[Human manager interacts with root agent to approve the request]
+Agent: β Great news! Your reimbursement has been approved by the manager. Processing $200 for conference travel.
+```
+
+## Code Structure
+
+### Main Agent (`agent.py`)
+
+- **`reimburse(purpose: str, amount: float)`**: Function tool for processing reimbursements
+- **`approval_agent`**: Remote A2A agent configuration for human approval workflows
+- **`root_agent`**: Main reimbursement agent with automatic/manual approval logic
+
+### Remote Approval Agent (`remote_a2a/human_in_loop/`)
+
+- **`agent.py`**: Implementation of the approval agent with long-running tools
+- **`agent.json`**: Agent card of the A2A agent
+
+- **`ask_for_approval()`**: Long-running tool that handles approval requests
+
+## Long-Running Tool Workflow
+
+The human-in-the-loop process follows this pattern:
+
+1. **Initial Call**: Root agent delegates approval request to remote approval agent for amounts >$100
+2. **Pending Response**: Remote approval agent returns immediate response with `status: "pending"` and ticket ID and serface the approval request to root agent
+3. **Agent Acknowledgment**: Root agent informs user about pending approval status
+4. **Human Interaction**: Human manager interacts with root agent to review and approve/reject the request
+5. **Updated Response**: Root agent receives updated tool response with approval decision and send it to remote agent
+6. **Final Action**: Remote agent processes the approval and completes the reimbursement and send the result to root_agent
+
+## Extending the Sample
+
+You can extend this sample by:
+
+- Adding more complex approval hierarchies (multiple approval levels)
+- Implementing different approval rules based on expense categories
+- Creating additional remote agent for budget checking or policy validation
+- Adding notification systems for approval status updates
+- Integrating with external approval systems or databases
+- Implementing approval timeouts and escalation procedures
+
+## Troubleshooting
+
+**Connection Issues:**
+- Ensure the local ADK web server is running on port 8000
+- Ensure the remote A2A server is running on port 8001
+- Check that no firewall is blocking localhost connections
+- Verify the agent.json URL matches the running A2A server
+
+**Agent Not Responding:**
+- Check the logs for both the local ADK web server on port 8000 and remote A2A server on port 8001
+- Verify the agent instructions are clear and unambiguous
+- Ensure long-running tool responses are properly formatted with matching IDs
+
+**Approval Workflow Issues:**
+- Verify that updated tool responses use the same `id` and `name` as the original function call
+- Check that the approval status is correctly updated in the tool response
+- Ensure the human approval process is properly simulated or integrated
diff --git a/contributing/samples/a2a_human_in_loop/__init__.py b/contributing/samples/a2a_human_in_loop/__init__.py
new file mode 100644
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/a2a_human_in_loop/agent.py b/contributing/samples/a2a_human_in_loop/agent.py
new file mode 100644
index 000000000..d15b5bb3f
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/agent.py
@@ -0,0 +1,49 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.adk import Agent
+from google.adk.agents.remote_a2a_agent import RemoteA2aAgent
+from google.genai import types
+
+
+def reimburse(purpose: str, amount: float) -> str:
+ """Reimburse the amount of money to the employee."""
+ return {
+ 'status': 'ok',
+ }
+
+
+approval_agent = RemoteA2aAgent(
+ name='approval_agent',
+ description='Help approve the reimburse if the amount is greater than 100.',
+ agent_card='http://localhost:8001/a2a/human_in_loop/.well-known/agent.json',
+)
+
+
+root_agent = Agent(
+ model='gemini-1.5-flash',
+ name='reimbursement_agent',
+ instruction="""
+ You are an agent whose job is to handle the reimbursement process for
+ the employees. If the amount is less than $100, you will automatically
+ approve the reimbursement. And call reimburse() to reimburse the amount to the employee.
+
+ If the amount is greater than $100. You will hand over the request to
+ approval_agent to handle the reimburse.
+""",
+ tools=[reimburse],
+ sub_agents=[approval_agent],
+ generate_content_config=types.GenerateContentConfig(temperature=0.1),
+)
diff --git a/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/__init__.py b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/__init__.py
new file mode 100644
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.json b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.json
new file mode 100644
index 000000000..17153b7cf
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.json
@@ -0,0 +1,29 @@
+{
+ "capabilities": {},
+ "defaultInputModes": ["text/plain"],
+ "defaultOutputModes": ["application/json"],
+ "description": "A reimbursement agent that handles employee expense reimbursement requests. Automatically approves amounts under $100 and requires manager approval for larger amounts using long-running tools for human-in-the-loop workflows.",
+ "name": "reimbursement_agent",
+ "skills": [
+ {
+ "id": "automatic_reimbursement",
+ "name": "Automatic Reimbursement",
+ "description": "Automatically process and approve reimbursements under $100",
+ "tags": ["reimbursement", "automation", "finance"]
+ },
+ {
+ "id": "approval_workflow",
+ "name": "Approval Workflow",
+ "description": "Request manager approval for reimbursements over $100 using long-running tools",
+ "tags": ["approval", "workflow", "human-in-loop"]
+ },
+ {
+ "id": "expense_processing",
+ "name": "Expense Processing",
+ "description": "Process employee expense claims and handle reimbursement logic",
+ "tags": ["expenses", "processing", "employee-services"]
+ }
+ ],
+ "url": "http://localhost:8000/a2a/human_in_loop",
+ "version": "1.0.0"
+}
diff --git a/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.py b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.py
new file mode 100644
index 000000000..acf7e4567
--- /dev/null
+++ b/contributing/samples/a2a_human_in_loop/remote_a2a/human_in_loop/agent.py
@@ -0,0 +1,56 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any
+
+from google.adk import Agent
+from google.adk.tools import ToolContext
+from google.adk.tools.long_running_tool import LongRunningFunctionTool
+from google.genai import types
+
+
+def reimburse(purpose: str, amount: float) -> str:
+ """Reimburse the amount of money to the employee."""
+ return {
+ 'status': 'ok',
+ }
+
+
+def ask_for_approval(
+ purpose: str, amount: float, tool_context: ToolContext
+) -> dict[str, Any]:
+ """Ask for approval for the reimbursement."""
+ return {
+ 'status': 'pending',
+ 'amount': amount,
+ 'ticketId': 'reimbursement-ticket-001',
+ }
+
+
+root_agent = Agent(
+ model='gemini-1.5-flash',
+ name='reimbursement_agent',
+ instruction="""
+ You are an agent whose job is to handle the reimbursement process for
+ the employees. If the amount is less than $100, you will automatically
+ approve the reimbursement.
+
+ If the amount is greater than $100, you will
+ ask for approval from the manager. If the manager approves, you will
+ call reimburse() to reimburse the amount to the employee. If the manager
+ rejects, you will inform the employee of the rejection.
+""",
+ tools=[reimburse, LongRunningFunctionTool(func=ask_for_approval)],
+ generate_content_config=types.GenerateContentConfig(temperature=0.1),
+)
diff --git a/contributing/samples/adk_issue_formatting_agent/__init__.py b/contributing/samples/adk_issue_formatting_agent/__init__.py
new file mode 100644
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/adk_issue_formatting_agent/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/adk_issue_formatting_agent/agent.py b/contributing/samples/adk_issue_formatting_agent/agent.py
new file mode 100644
index 000000000..78add9b83
--- /dev/null
+++ b/contributing/samples/adk_issue_formatting_agent/agent.py
@@ -0,0 +1,241 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+from typing import Any
+
+from adk_issue_formatting_agent.settings import GITHUB_BASE_URL
+from adk_issue_formatting_agent.settings import IS_INTERACTIVE
+from adk_issue_formatting_agent.settings import OWNER
+from adk_issue_formatting_agent.settings import REPO
+from adk_issue_formatting_agent.utils import error_response
+from adk_issue_formatting_agent.utils import get_request
+from adk_issue_formatting_agent.utils import post_request
+from adk_issue_formatting_agent.utils import read_file
+from google.adk import Agent
+import requests
+
+BUG_REPORT_TEMPLATE = read_file(
+ Path(__file__).parent / "../../../../.github/ISSUE_TEMPLATE/bug_report.md"
+)
+FREATURE_REQUEST_TEMPLATE = read_file(
+ Path(__file__).parent
+ / "../../../../.github/ISSUE_TEMPLATE/feature_request.md"
+)
+
+APPROVAL_INSTRUCTION = (
+ "**Do not** wait or ask for user approval or confirmation for adding the"
+ " comment."
+)
+if IS_INTERACTIVE:
+ APPROVAL_INSTRUCTION = (
+ "Ask for user approval or confirmation for adding the comment."
+ )
+
+
+def list_open_issues(issue_count: int) -> dict[str, Any]:
+ """List most recent `issue_count` numer of open issues in the repo.
+
+ Args:
+ issue_count: number of issues to return
+
+ Returns:
+ The status of this request, with a list of issues when successful.
+ """
+ url = f"{GITHUB_BASE_URL}/search/issues"
+ query = f"repo:{OWNER}/{REPO} is:open is:issue"
+ params = {
+ "q": query,
+ "sort": "created",
+ "order": "desc",
+ "per_page": issue_count,
+ "page": 1,
+ }
+
+ try:
+ response = get_request(url, params)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ issues = response.get("items", None)
+ return {"status": "success", "issues": issues}
+
+
+def get_issue(issue_number: int) -> dict[str, Any]:
+ """Get the details of the specified issue number.
+
+ Args:
+ issue_number: issue number of the Github issue.
+
+ Returns:
+ The status of this request, with the issue details when successful.
+ """
+ url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}"
+ try:
+ response = get_request(url)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ return {"status": "success", "issue": response}
+
+
+def add_comment_to_issue(issue_number: int, comment: str) -> dict[str, any]:
+ """Add the specified comment to the given issue number.
+
+ Args:
+ issue_number: issue number of the Github issue
+ comment: comment to add
+
+ Returns:
+ The the status of this request, with the applied comment when successful.
+ """
+ print(f"Attempting to add comment '{comment}' to issue #{issue_number}")
+ url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}/comments"
+ payload = {"body": comment}
+
+ try:
+ response = post_request(url, payload)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ return {
+ "status": "success",
+ "added_comment": response,
+ }
+
+
+def list_comments_on_issue(issue_number: int) -> dict[str, any]:
+ """List all comments on the given issue number.
+
+ Args:
+ issue_number: issue number of the Github issue
+
+ Returns:
+ The the status of this request, with the list of comments when successful.
+ """
+ print(f"Attempting to list comments on issue #{issue_number}")
+ url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}/comments"
+
+ try:
+ response = get_request(url)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ return {"status": "success", "comments": response}
+
+
+root_agent = Agent(
+ model="gemini-2.5-pro",
+ name="adk_issue_formatting_assistant",
+ description="Check ADK issue format and content.",
+ instruction=f"""
+ # 1. IDENTITY
+ You are an AI assistant designed to help maintain the quality and consistency of issues in our GitHub repository.
+ Your primary role is to act as a "GitHub Issue Format Validator." You will analyze new and existing **open** issues
+ to ensure they contain all the necessary information as required by our templates. You are helpful, polite,
+ and precise in your feedback.
+
+ # 2. CONTEXT & RESOURCES
+ * **Repository:** You are operating on the GitHub repository `{OWNER}/{REPO}`.
+ * **Bug Report Template:** (`{BUG_REPORT_TEMPLATE}`)
+ * **Feature Request Template:** (`{FREATURE_REQUEST_TEMPLATE}`)
+
+ # 3. CORE MISSION
+ Your goal is to check if a GitHub issue, identified as either a "bug" or a "feature request,"
+ contains all the information required by the corresponding template. If it does not, your job is
+ to post a single, helpful comment asking the original author to provide the missing information.
+ {APPROVAL_INSTRUCTION}
+
+ **IMPORTANT NOTE:**
+ * You add one comment at most each time you are invoked.
+ * Don't proceed to other issues which are not the target issues.
+ * Don't take any action on closed issues.
+
+ # 4. BEHAVIORAL RULES & LOGIC
+
+ ## Step 1: Identify Issue Type & Applicability
+
+ Your first task is to determine if the issue is a valid target for validation.
+
+ 1. **Assess Content Intent:** You must perform a quick semantic check of the issue's title, body, and comments.
+ If you determine the issue's content is fundamentally *not* a bug report or a feature request
+ (for example, it is a general question, a request for help, or a discussion prompt), then you must ignore it.
+ 2. **Exit Condition:** If the issue does not clearly fall into the categories of "bug" or "feature request"
+ based on both its labels and its content, **take no action**.
+
+ ## Step 2: Analyze the Issue Content
+
+ If you have determined the issue is a valid bug or feature request, your analysis depends on whether it has comments.
+
+ **Scenario A: Issue has NO comments**
+ 1. Read the main body of the issue.
+ 2. Compare the content of the issue body against the required headings/sections in the relevant template (Bug or Feature).
+ 3. Check for the presence of content under each heading. A heading with no content below it is considered incomplete.
+ 4. If one or more sections are missing or empty, proceed to Step 3.
+ 5. If all sections are filled out, your task is complete. Do nothing.
+
+ **Scenario B: Issue HAS one or more comments**
+ 1. First, analyze the main issue body to see which sections of the template are filled out.
+ 2. Next, read through **all** the comments in chronological order.
+ 3. As you read the comments, check if the information provided in them satisfies any of the template sections that were missing from the original issue body.
+ 4. After analyzing the body and all comments, determine if any required sections from the template *still* remain unaddressed.
+ 5. If one or more sections are still missing information, proceed to Step 3.
+ 6. If the issue body and comments *collectively* provide all the required information, your task is complete. Do nothing.
+
+ ## Step 3: Formulate and Post a Comment (If Necessary)
+
+ If you determined in Step 2 that information is missing, you must post a **single comment** on the issue.
+
+ Please include a bolded note in your comment that this comment was added by an ADK agent.
+
+ **Comment Guidelines:**
+ * **Be Polite and Helpful:** Start with a friendly tone.
+ * **Be Specific:** Clearly list only the sections from the template that are still missing. Do not list sections that have already been filled out.
+ * **Address the Author:** Mention the issue author by their username (e.g., `@username`).
+ * **Provide Context:** Explain *why* the information is needed (e.g., "to help us reproduce the bug" or "to better understand your request").
+ * **Do not be repetitive:** If you have already commented on an issue asking for information, do not comment again unless new information has been added and it's still incomplete.
+
+ **Example Comment for a Bug Report:**
+ > **Response from ADK Agent**
+ >
+ > Hello @[issue-author-username], thank you for submitting this issue!
+ >
+ > To help us investigate and resolve this bug effectively, could you please provide the missing details for the following sections of our bug report template:
+ >
+ > * **To Reproduce:** (Please provide the specific steps required to reproduce the behavior)
+ > * **Desktop (please complete the following information):** (Please provide OS, Python version, and ADK version)
+ >
+ > This information will give us the context we need to move forward. Thanks!
+
+ **Example Comment for a Feature Request:**
+ > **Response from ADK Agent**
+ >
+ > Hi @[issue-author-username], thanks for this great suggestion!
+ >
+ > To help our team better understand and evaluate your feature request, could you please provide a bit more information on the following section:
+ >
+ > * **Is your feature request related to a problem? Please describe.**
+ >
+ > We look forward to hearing more about your idea!
+
+ # 5. FINAL INSTRUCTION
+
+ Execute this process for the given GitHub issue. Your final output should either be **[NO ACTION]**
+ if the issue is complete or invalid, or **[POST COMMENT]** followed by the exact text of the comment you will post.
+
+ Please include your justification for your decision in your output.
+ """,
+ tools={
+ list_open_issues,
+ get_issue,
+ add_comment_to_issue,
+ list_comments_on_issue,
+ },
+)
diff --git a/contributing/samples/adk_issue_formatting_agent/settings.py b/contributing/samples/adk_issue_formatting_agent/settings.py
new file mode 100644
index 000000000..d29bda9b7
--- /dev/null
+++ b/contributing/samples/adk_issue_formatting_agent/settings.py
@@ -0,0 +1,33 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from dotenv import load_dotenv
+
+load_dotenv(override=True)
+
+GITHUB_BASE_URL = "https://api.github.com"
+
+GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
+if not GITHUB_TOKEN:
+ raise ValueError("GITHUB_TOKEN environment variable not set")
+
+OWNER = os.getenv("OWNER", "google")
+REPO = os.getenv("REPO", "adk-python")
+EVENT_NAME = os.getenv("EVENT_NAME")
+ISSUE_NUMBER = os.getenv("ISSUE_NUMBER")
+ISSUE_COUNT_TO_PROCESS = os.getenv("ISSUE_COUNT_TO_PROCESS")
+
+IS_INTERACTIVE = os.environ.get("INTERACTIVE", "1").lower() in ["true", "1"]
diff --git a/contributing/samples/adk_issue_formatting_agent/utils.py b/contributing/samples/adk_issue_formatting_agent/utils.py
new file mode 100644
index 000000000..2ee735d3d
--- /dev/null
+++ b/contributing/samples/adk_issue_formatting_agent/utils.py
@@ -0,0 +1,53 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any
+
+from adk_issue_formatting_agent.settings import GITHUB_TOKEN
+import requests
+
+headers = {
+ "Authorization": f"token {GITHUB_TOKEN}",
+ "Accept": "application/vnd.github.v3+json",
+}
+
+
+def get_request(
+ url: str, params: dict[str, Any] | None = None
+) -> dict[str, Any]:
+ if params is None:
+ params = {}
+ response = requests.get(url, headers=headers, params=params, timeout=60)
+ response.raise_for_status()
+ return response.json()
+
+
+def post_request(url: str, payload: Any) -> dict[str, Any]:
+ response = requests.post(url, headers=headers, json=payload, timeout=60)
+ response.raise_for_status()
+ return response.json()
+
+
+def error_response(error_message: str) -> dict[str, Any]:
+ return {"status": "error", "message": error_message}
+
+
+def read_file(file_path: str) -> str:
+ """Read the content of the given file."""
+ try:
+ with open(file_path, "r") as f:
+ return f.read()
+ except FileNotFoundError:
+ print(f"Error: File not found: {file_path}.")
+ return ""
diff --git a/contributing/samples/adk_triaging_agent/agent.py b/contributing/samples/adk_triaging_agent/agent.py
index ecf574572..dcd9b9580 100644
--- a/contributing/samples/adk_triaging_agent/agent.py
+++ b/contributing/samples/adk_triaging_agent/agent.py
@@ -12,26 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
-
+from typing import Any
+
+from adk_triaging_agent.settings import BOT_LABEL
+from adk_triaging_agent.settings import GITHUB_BASE_URL
+from adk_triaging_agent.settings import IS_INTERACTIVE
+from adk_triaging_agent.settings import OWNER
+from adk_triaging_agent.settings import REPO
+from adk_triaging_agent.utils import error_response
+from adk_triaging_agent.utils import get_request
+from adk_triaging_agent.utils import post_request
from google.adk import Agent
import requests
-GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
-if not GITHUB_TOKEN:
- raise ValueError("GITHUB_TOKEN environment variable not set")
-
-OWNER = os.getenv("OWNER", "google")
-REPO = os.getenv("REPO", "adk-python")
-BOT_LABEL = os.getenv("BOT_LABEL", "bot_triaged")
-
-BASE_URL = "https://api.github.com"
-
-headers = {
- "Authorization": f"token {GITHUB_TOKEN}",
- "Accept": "application/vnd.github.v3+json",
-}
-
ALLOWED_LABELS = [
"documentation",
"services",
@@ -45,24 +38,25 @@
"web",
]
-
-def is_interactive():
- return os.environ.get("INTERACTIVE", "1").lower() in ["true", "1"]
+APPROVAL_INSTRUCTION = (
+ "Do not ask for user approval for labeling! If you can't find appropriate"
+ " labels for the issue, do not label it."
+)
+if IS_INTERACTIVE:
+ APPROVAL_INSTRUCTION = "Only label them when the user approves the labeling!"
-def list_issues(issue_count: int):
- """
- Generator to list all issues for the repository by handling pagination.
+def list_unlabeled_issues(issue_count: int) -> dict[str, Any]:
+ """List most recent `issue_count` numer of unlabeled issues in the repo.
Args:
issue_count: number of issues to return
+ Returns:
+ The status of this request, with a list of issues when successful.
"""
+ url = f"{GITHUB_BASE_URL}/search/issues"
query = f"repo:{OWNER}/{REPO} is:open is:issue no:label"
-
- unlabelled_issues = []
- url = f"{BASE_URL}/search/issues"
-
params = {
"q": query,
"sort": "created",
@@ -70,57 +64,57 @@ def list_issues(issue_count: int):
"per_page": issue_count,
"page": 1,
}
- response = requests.get(url, headers=headers, params=params, timeout=60)
- response.raise_for_status()
- json_response = response.json()
- issues = json_response.get("items", None)
- if not issues:
- return []
+
+ try:
+ response = get_request(url, params)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ issues = response.get("items", None)
+
+ unlabeled_issues = []
for issue in issues:
- if not issue.get("labels", None) or len(issue["labels"]) == 0:
- unlabelled_issues.append(issue)
- return unlabelled_issues
+ if not issue.get("labels", None):
+ unlabeled_issues.append(issue)
+ return {"status": "success", "issues": unlabeled_issues}
-def add_label_to_issue(issue_number: str, label: str):
- """
- Add the specified label to the given issue number.
+def add_label_to_issue(issue_number: int, label: str) -> dict[str, Any]:
+ """Add the specified label to the given issue number.
Args:
- issue_number: issue number of the Github issue, in string foramt.
+ issue_number: issue number of the Github issue.
label: label to assign
+
+ Returns:
+ The the status of this request, with the applied label when successful.
"""
print(f"Attempting to add label '{label}' to issue #{issue_number}")
if label not in ALLOWED_LABELS:
- error_message = (
+ return error_response(
f"Error: Label '{label}' is not an allowed label. Will not apply."
)
- print(error_message)
- return {"status": "error", "message": error_message, "applied_label": None}
- url = f"{BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}/labels"
+ url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}/labels"
payload = [label, BOT_LABEL]
- response = requests.post(url, headers=headers, json=payload, timeout=60)
- response.raise_for_status()
- return response.json()
+ try:
+ response = post_request(url, payload)
+ except requests.exceptions.RequestException as e:
+ return error_response(f"Error: {e}")
+ return {
+ "status": "success",
+ "message": response,
+ "applied_label": label,
+ }
-approval_instruction = (
- "Only label them when the user approves the labeling!"
- if is_interactive()
- else (
- "Do not ask for user approval for labeling! If you can't find a"
- " appropriate labels for the issue, do not label it."
- )
-)
root_agent = Agent(
- model="gemini-2.5-pro-preview-05-06",
+ model="gemini-2.5-pro",
name="adk_triaging_assistant",
description="Triage ADK issues.",
instruction=f"""
- You are a Github adk-python repo triaging bot. You will help get issues, and recommend a label.
- IMPORTANT: {approval_instruction}
+ You are a triaging bot for the Github {REPO} repo with the owner {OWNER}. You will help get issues, and recommend a label.
+ IMPORTANT: {APPROVAL_INSTRUCTION}
Here are the rules for labeling:
- If the user is asking about documentation-related questions, label it with "documentation".
- If it's about session, memory services, label it with "services"
@@ -138,8 +132,5 @@ def add_label_to_issue(issue_number: str, label: str):
- the issue summary in a few sentence
- your label recommendation and justification
""",
- tools=[
- list_issues,
- add_label_to_issue,
- ],
+ tools=[list_unlabeled_issues, add_label_to_issue],
)
diff --git a/contributing/samples/adk_triaging_agent/main.py b/contributing/samples/adk_triaging_agent/main.py
index a749b26fc..317f5893e 100644
--- a/contributing/samples/adk_triaging_agent/main.py
+++ b/contributing/samples/adk_triaging_agent/main.py
@@ -13,48 +13,37 @@
# limitations under the License.
import asyncio
-import os
import time
-import agent
-from dotenv import load_dotenv
+from adk_triaging_agent import agent
+from adk_triaging_agent.settings import EVENT_NAME
+from adk_triaging_agent.settings import GITHUB_BASE_URL
+from adk_triaging_agent.settings import ISSUE_BODY
+from adk_triaging_agent.settings import ISSUE_COUNT_TO_PROCESS
+from adk_triaging_agent.settings import ISSUE_NUMBER
+from adk_triaging_agent.settings import ISSUE_TITLE
+from adk_triaging_agent.settings import OWNER
+from adk_triaging_agent.settings import REPO
+from adk_triaging_agent.utils import get_request
+from adk_triaging_agent.utils import parse_number_string
from google.adk.agents.run_config import RunConfig
from google.adk.runners import InMemoryRunner
-from google.adk.sessions import Session
+from google.adk.runners import Runner
from google.genai import types
import requests
-load_dotenv(override=True)
-
-OWNER = os.getenv("OWNER", "google")
-REPO = os.getenv("REPO", "adk-python")
-GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
-BASE_URL = "https://api.github.com"
-headers = {
- "Authorization": f"token {GITHUB_TOKEN}",
- "Accept": "application/vnd.github.v3+json",
-}
-
-if not GITHUB_TOKEN:
- print(
- "Warning: GITHUB_TOKEN environment variable not set. API calls might"
- " fail."
- )
+APP_NAME = "adk_triage_app"
+USER_ID = "adk_triage_user"
async def fetch_specific_issue_details(issue_number: int):
"""Fetches details for a single issue if it's unlabelled."""
- if not GITHUB_TOKEN:
- print("Cannot fetch issue details: GITHUB_TOKEN is not set.")
- return None
-
- url = f"{BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}"
+ url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{issue_number}"
print(f"Fetching details for specific issue: {url}")
+
try:
- response = requests.get(url, headers=headers, timeout=60)
- response.raise_for_status()
- issue_data = response.json()
- if not issue_data.get("labels") or len(issue_data["labels"]) == 0:
+ issue_data = get_request(url)
+ if not issue_data.get("labels", None):
print(f"Issue #{issue_number} is unlabelled. Proceeding.")
return {
"number": issue_data["number"],
@@ -71,94 +60,91 @@ async def fetch_specific_issue_details(issue_number: int):
return None
+async def call_agent_async(
+ runner: Runner, user_id: str, session_id: str, prompt: str
+) -> str:
+ """Call the agent asynchronously with the user's prompt."""
+ content = types.Content(
+ role="user", parts=[types.Part.from_text(text=prompt)]
+ )
+
+ final_response_text = ""
+ async for event in runner.run_async(
+ user_id=user_id,
+ session_id=session_id,
+ new_message=content,
+ run_config=RunConfig(save_input_blobs_as_artifacts=False),
+ ):
+ if (
+ event.content
+ and event.content.parts
+ and hasattr(event.content.parts[0], "text")
+ and event.content.parts[0].text
+ ):
+ print(f"** {event.author} (ADK): {event.content.parts[0].text}")
+ if event.author == agent.root_agent.name:
+ final_response_text += event.content.parts[0].text
+
+ return final_response_text
+
+
async def main():
- app_name = "triage_app"
- user_id_1 = "triage_user"
runner = InMemoryRunner(
agent=agent.root_agent,
- app_name=app_name,
+ app_name=APP_NAME,
)
- session_11 = await runner.session_service.create_session(
- app_name=app_name, user_id=user_id_1
+ session = await runner.session_service.create_session(
+ user_id=USER_ID,
+ app_name=APP_NAME,
)
- async def run_agent_prompt(session: Session, prompt_text: str):
- content = types.Content(
- role="user", parts=[types.Part.from_text(text=prompt_text)]
+ if EVENT_NAME == "issues" and ISSUE_NUMBER:
+ print(f"EVENT: Processing specific issue due to '{EVENT_NAME}' event.")
+ issue_number = parse_number_string(ISSUE_NUMBER)
+ if not issue_number:
+ print(f"Error: Invalid issue number received: {ISSUE_NUMBER}.")
+ return
+
+ specific_issue = await fetch_specific_issue_details(issue_number)
+ if specific_issue is None:
+ print(
+ f"No unlabelled issue details found for #{issue_number} or an error"
+ " occurred. Skipping agent interaction."
+ )
+ return
+
+ issue_title = ISSUE_TITLE or specific_issue["title"]
+ issue_body = ISSUE_BODY or specific_issue["body"]
+ prompt = (
+ f"A new GitHub issue #{issue_number} has been opened or"
+ f' reopened. Title: "{issue_title}"\nBody:'
+ f' "{issue_body}"\n\nBased on the rules, recommend an'
+ " appropriate label and its justification."
+ " Then, use the 'add_label_to_issue' tool to apply the label "
+ "directly to this issue. Only label it, do not"
+ " process any other issues."
)
- print(f"\n>>>> Agent Prompt: {prompt_text}")
- final_agent_response_parts = []
- async for event in runner.run_async(
- user_id=user_id_1,
- session_id=session.id,
- new_message=content,
- run_config=RunConfig(save_input_blobs_as_artifacts=False),
- ):
- if event.content.parts and event.content.parts[0].text:
- print(f"** {event.author} (ADK): {event.content.parts[0].text}")
- if event.author == agent.root_agent.name:
- final_agent_response_parts.append(event.content.parts[0].text)
- print(f"<<<< Agent Final Output: {''.join(final_agent_response_parts)}\n")
-
- event_name = os.getenv("EVENT_NAME")
- issue_number_str = os.getenv("ISSUE_NUMBER")
-
- if event_name == "issues" and issue_number_str:
- print(f"EVENT: Processing specific issue due to '{event_name}' event.")
- try:
- issue_number = int(issue_number_str)
- specific_issue = await fetch_specific_issue_details(issue_number)
-
- if specific_issue:
- prompt = (
- f"A new GitHub issue #{specific_issue['number']} has been opened or"
- f" reopened. Title: \"{specific_issue['title']}\"\nBody:"
- f" \"{specific_issue['body']}\"\n\nBased on the rules, recommend an"
- " appropriate label and its justification."
- " Then, use the 'add_label_to_issue' tool to apply the label "
- "directly to this issue."
- f" The issue number is {specific_issue['number']}."
- )
- await run_agent_prompt(session_11, prompt)
- else:
- print(
- f"No unlabelled issue details found for #{issue_number} or an error"
- " occurred. Skipping agent interaction."
- )
-
- except ValueError:
- print(f"Error: Invalid ISSUE_NUMBER received: {issue_number_str}")
-
else:
- print(f"EVENT: Processing batch of issues (event: {event_name}).")
- issue_count_str = os.getenv("ISSUE_COUNT_TO_PROCESS", "3")
- try:
- num_issues_to_process = int(issue_count_str)
- except ValueError:
- print(f"Warning: Invalid ISSUE_COUNT_TO_PROCESS. Defaulting to 3.")
- num_issues_to_process = 3
+ print(f"EVENT: Processing batch of issues (event: {EVENT_NAME}).")
+ issue_count = parse_number_string(ISSUE_COUNT_TO_PROCESS, default_value=3)
+ prompt = f"Please triage the most recent {issue_count} issues."
- prompt = (
- f"List the first {num_issues_to_process} unlabelled open issues from"
- f" the {OWNER}/{REPO} repository. For each issue, provide a summary,"
- " recommend a label with justification, and then use the"
- " 'add_label_to_issue' tool to apply the recommended label directly."
- )
- await run_agent_prompt(session_11, prompt)
+ response = await call_agent_async(runner, USER_ID, session.id, prompt)
+ print(f"<<<< Agent Final Output: {response}\n")
if __name__ == "__main__":
start_time = time.time()
print(
- "Script start time:",
- time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(start_time)),
+ f"Start triaging {OWNER}/{REPO} issues at"
+ f" {time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(start_time))}"
)
- print("------------------------------------")
+ print("-" * 80)
asyncio.run(main())
+ print("-" * 80)
end_time = time.time()
- print("------------------------------------")
print(
- "Script end time:",
- time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(end_time)),
+ "Triaging finished at"
+ f" {time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(end_time))}",
)
print("Total script execution time:", f"{end_time - start_time:.2f} seconds")
diff --git a/contributing/samples/adk_triaging_agent/settings.py b/contributing/samples/adk_triaging_agent/settings.py
new file mode 100644
index 000000000..5fc1a9073
--- /dev/null
+++ b/contributing/samples/adk_triaging_agent/settings.py
@@ -0,0 +1,36 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from dotenv import load_dotenv
+
+load_dotenv(override=True)
+
+GITHUB_BASE_URL = "https://api.github.com"
+
+GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
+if not GITHUB_TOKEN:
+ raise ValueError("GITHUB_TOKEN environment variable not set")
+
+OWNER = os.getenv("OWNER", "google")
+REPO = os.getenv("REPO", "adk-python")
+BOT_LABEL = os.getenv("BOT_LABEL", "bot_triaged")
+EVENT_NAME = os.getenv("EVENT_NAME")
+ISSUE_NUMBER = os.getenv("ISSUE_NUMBER")
+ISSUE_TITLE = os.getenv("ISSUE_TITLE")
+ISSUE_BODY = os.getenv("ISSUE_BODY")
+ISSUE_COUNT_TO_PROCESS = os.getenv("ISSUE_COUNT_TO_PROCESS")
+
+IS_INTERACTIVE = os.environ.get("INTERACTIVE", "1").lower() in ["true", "1"]
diff --git a/contributing/samples/adk_triaging_agent/utils.py b/contributing/samples/adk_triaging_agent/utils.py
new file mode 100644
index 000000000..80c32c010
--- /dev/null
+++ b/contributing/samples/adk_triaging_agent/utils.py
@@ -0,0 +1,55 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any
+
+from adk_triaging_agent.settings import GITHUB_TOKEN
+import requests
+
+headers = {
+ "Authorization": f"token {GITHUB_TOKEN}",
+ "Accept": "application/vnd.github.v3+json",
+}
+
+
+def get_request(
+ url: str, params: dict[str, Any] | None = None
+) -> dict[str, Any]:
+ if params is None:
+ params = {}
+ response = requests.get(url, headers=headers, params=params, timeout=60)
+ response.raise_for_status()
+ return response.json()
+
+
+def post_request(url: str, payload: Any) -> dict[str, Any]:
+ response = requests.post(url, headers=headers, json=payload, timeout=60)
+ response.raise_for_status()
+ return response.json()
+
+
+def error_response(error_message: str) -> dict[str, Any]:
+ return {"status": "error", "message": error_message}
+
+
+def parse_number_string(number_str: str, default_value: int = 0) -> int:
+ """Parse a number from the given string."""
+ try:
+ return int(number_str)
+ except ValueError:
+ print(
+ f"Warning: Invalid number string: {number_str}. Defaulting to"
+ f" {default_value}."
+ )
+ return default_value
diff --git a/contributing/samples/artifact_save_text/agent.py b/contributing/samples/artifact_save_text/agent.py
index 53a7f300d..3ce43bcd1 100755
--- a/contributing/samples/artifact_save_text/agent.py
+++ b/contributing/samples/artifact_save_text/agent.py
@@ -31,7 +31,7 @@ async def log_query(tool_context: ToolContext, query: str):
model='gemini-2.0-flash',
name='log_agent',
description='Log user query.',
- instruction="""Always log the user query and reploy "kk, I've logged."
+ instruction="""Always log the user query and reply "kk, I've logged."
""",
tools=[log_query],
generate_content_config=types.GenerateContentConfig(
diff --git a/contributing/samples/bigquery/README.md b/contributing/samples/bigquery/README.md
index cd4583c72..050ce1332 100644
--- a/contributing/samples/bigquery/README.md
+++ b/contributing/samples/bigquery/README.md
@@ -40,13 +40,28 @@ would set:
### With Application Default Credentials
This mode is useful for quick development when the agent builder is the only
-user interacting with the agent. The tools are initialized with the default
-credentials present on the machine running the agent.
+user interacting with the agent. The tools are run with these credentials.
1. Create application default credentials on the machine where the agent would
be running by following https://cloud.google.com/docs/authentication/provide-credentials-adc.
-1. Set `RUN_WITH_ADC=True` in `agent.py` and run the agent
+1. Set `CREDENTIALS_TYPE=None` in `agent.py`
+
+1. Run the agent
+
+### With Service Account Keys
+
+This mode is useful for quick development when the agent builder wants to run
+the agent with service account credentials. The tools are run with these
+credentials.
+
+1. Create service account key by following https://cloud.google.com/iam/docs/service-account-creds#user-managed-keys.
+
+1. Set `CREDENTIALS_TYPE=AuthCredentialTypes.SERVICE_ACCOUNT` in `agent.py`
+
+1. Download the key file and replace `"service_account_key.json"` with the path
+
+1. Run the agent
### With Interactive OAuth
@@ -72,7 +87,7 @@ type.
Note: don't create a separate .env, instead put it to the same .env file that
stores your Vertex AI or Dev ML credentials
-1. Set `RUN_WITH_ADC=False` in `agent.py` and run the agent
+1. Set `CREDENTIALS_TYPE=AuthCredentialTypes.OAUTH2` in `agent.py` and run the agent
## Sample prompts
diff --git a/contributing/samples/bigquery/agent.py b/contributing/samples/bigquery/agent.py
index 0999ca12a..b78f79685 100644
--- a/contributing/samples/bigquery/agent.py
+++ b/contributing/samples/bigquery/agent.py
@@ -15,24 +15,25 @@
import os
from google.adk.agents import llm_agent
+from google.adk.auth import AuthCredentialTypes
from google.adk.tools.bigquery import BigQueryCredentialsConfig
from google.adk.tools.bigquery import BigQueryToolset
from google.adk.tools.bigquery.config import BigQueryToolConfig
from google.adk.tools.bigquery.config import WriteMode
import google.auth
-RUN_WITH_ADC = False
+# Define an appropriate credential type
+CREDENTIALS_TYPE = AuthCredentialTypes.OAUTH2
+# Define BigQuery tool config with write mode set to allowed. Note that this is
+# only to demonstrate the full capability of the BigQuery tools. In production
+# you may want to change to BLOCKED (default write mode, effectively makes the
+# tool read-only) or PROTECTED (only allows writes in the anonymous dataset of a
+# BigQuery session) write mode.
tool_config = BigQueryToolConfig(write_mode=WriteMode.ALLOWED)
-if RUN_WITH_ADC:
- # Initialize the tools to use the application default credentials.
- application_default_credentials, _ = google.auth.default()
- credentials_config = BigQueryCredentialsConfig(
- credentials=application_default_credentials
- )
-else:
+if CREDENTIALS_TYPE == AuthCredentialTypes.OAUTH2:
# Initiaze the tools to do interactive OAuth
# The environment variables OAUTH_CLIENT_ID and OAUTH_CLIENT_SECRET
# must be set
@@ -40,6 +41,20 @@
client_id=os.getenv("OAUTH_CLIENT_ID"),
client_secret=os.getenv("OAUTH_CLIENT_SECRET"),
)
+elif CREDENTIALS_TYPE == AuthCredentialTypes.SERVICE_ACCOUNT:
+ # Initialize the tools to use the credentials in the service account key.
+ # If this flow is enabled, make sure to replace the file path with your own
+ # service account key file
+ # https://cloud.google.com/iam/docs/service-account-creds#user-managed-keys
+ creds, _ = google.auth.load_credentials_from_file("service_account_key.json")
+ credentials_config = BigQueryCredentialsConfig(credentials=creds)
+else:
+ # Initialize the tools to use the application default credentials.
+ # https://cloud.google.com/docs/authentication/provide-credentials-adc
+ application_default_credentials, _ = google.auth.default()
+ credentials_config = BigQueryCredentialsConfig(
+ credentials=application_default_credentials
+ )
bigquery_toolset = BigQueryToolset(
credentials_config=credentials_config, bigquery_tool_config=tool_config
@@ -49,7 +64,7 @@
# debug CLI
root_agent = llm_agent.Agent(
model="gemini-2.0-flash",
- name="hello_agent",
+ name="bigquery_agent",
description=(
"Agent to answer questions about BigQuery data and models and execute"
" SQL queries."
diff --git a/contributing/samples/bigquery_agent/README.md b/contributing/samples/google_api/README.md
similarity index 50%
rename from contributing/samples/bigquery_agent/README.md
rename to contributing/samples/google_api/README.md
index c7dc7fd8b..c1e6e8d4c 100644
--- a/contributing/samples/bigquery_agent/README.md
+++ b/contributing/samples/google_api/README.md
@@ -1,45 +1,40 @@
-# BigQuery Sample
+# Google API Tools Sample
## Introduction
-This sample tests and demos the BigQuery support in ADK via two tools:
+This sample tests and demos Google API tools available in the
+`google.adk.tools.google_api_tool` module. We pick the following BigQuery API
+tools for this sample agent:
-* 1. bigquery_datasets_list:
+1. `bigquery_datasets_list`: List user's datasets.
- List user's datasets.
+2. `bigquery_datasets_get`: Get a dataset's details.
-* 2. bigquery_datasets_get:
- Get a dataset's details.
+3. `bigquery_datasets_insert`: Create a new dataset.
-* 3. bigquery_datasets_insert:
- Create a new dataset.
+4. `bigquery_tables_list`: List all tables in a dataset.
-* 4. bigquery_tables_list:
- List all tables in a dataset.
+5. `bigquery_tables_get`: Get a table's details.
-* 5. bigquery_tables_get:
- Get a table's details.
-
-* 6. bigquery_tables_insert:
- Insert a new table into a dataset.
+6. `bigquery_tables_insert`: Insert a new table into a dataset.
## How to use
-* 1. Follow https://developers.google.com/identity/protocols/oauth2#1.-obtain-oauth-2.0-credentials-from-the-dynamic_data.setvar.console_name. to get your client id and client secret.
+1. Follow https://developers.google.com/identity/protocols/oauth2#1.-obtain-oauth-2.0-credentials-from-the-dynamic_data.setvar.console_name. to get your client id and client secret.
Be sure to choose "web" as your client type.
-* 2. Configure your `.env` file to add two variables:
+2. Configure your `.env` file to add two variables:
* OAUTH_CLIENT_ID={your client id}
* OAUTH_CLIENT_SECRET={your client secret}
Note: don't create a separate `.env` file , instead put it to the same `.env` file that stores your Vertex AI or Dev ML credentials
-* 3. Follow https://developers.google.com/identity/protocols/oauth2/web-server#creatingcred to add http://localhost/dev-ui/ to "Authorized redirect URIs".
+3. Follow https://developers.google.com/identity/protocols/oauth2/web-server#creatingcred to add http://localhost/dev-ui/ to "Authorized redirect URIs".
Note: localhost here is just a hostname that you use to access the dev ui, replace it with the actual hostname you use to access the dev ui.
-* 4. For 1st run, allow popup for localhost in Chrome.
+4. For 1st run, allow popup for localhost in Chrome.
## Sample prompt
diff --git a/contributing/samples/google_api/__init__.py b/contributing/samples/google_api/__init__.py
new file mode 100644
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/google_api/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/google_api/agent.py b/contributing/samples/google_api/agent.py
new file mode 100644
index 000000000..1cdbab9c6
--- /dev/null
+++ b/contributing/samples/google_api/agent.py
@@ -0,0 +1,78 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from dotenv import load_dotenv
+from google.adk import Agent
+from google.adk.tools.google_api_tool import BigQueryToolset
+
+# Load environment variables from .env file
+load_dotenv()
+
+# Access the variable
+oauth_client_id = os.getenv("OAUTH_CLIENT_ID")
+oauth_client_secret = os.getenv("OAUTH_CLIENT_SECRET")
+tools_to_expose = [
+ "bigquery_datasets_list",
+ "bigquery_datasets_get",
+ "bigquery_datasets_insert",
+ "bigquery_tables_list",
+ "bigquery_tables_get",
+ "bigquery_tables_insert",
+]
+bigquery_toolset = BigQueryToolset(
+ client_id=oauth_client_id,
+ client_secret=oauth_client_secret,
+ tool_filter=tools_to_expose,
+)
+
+root_agent = Agent(
+ model="gemini-2.0-flash",
+ name="google_api_bigquery_agent",
+ instruction="""
+ You are a helpful Google BigQuery agent that help to manage users' data on Google BigQuery.
+ Use the provided tools to conduct various operations on users' data in Google BigQuery.
+
+ Scenario 1:
+ The user wants to query their biguqery datasets
+ Use bigquery_datasets_list to query user's datasets
+
+ Scenario 2:
+ The user wants to query the details of a specific dataset
+ Use bigquery_datasets_get to get a dataset's details
+
+ Scenario 3:
+ The user wants to create a new dataset
+ Use bigquery_datasets_insert to create a new dataset
+
+ Scenario 4:
+ The user wants to query their tables in a specific dataset
+ Use bigquery_tables_list to list all tables in a dataset
+
+ Scenario 5:
+ The user wants to query the details of a specific table
+ Use bigquery_tables_get to get a table's details
+
+ Scenario 6:
+ The user wants to insert a new table into a dataset
+ Use bigquery_tables_insert to insert a new table into a dataset
+
+ Current user:
+
+ {userInfo?}
+
+""",
+ tools=[bigquery_toolset],
+)
diff --git a/contributing/samples/hello_world_litellm_add_function_to_prompt/__init__.py b/contributing/samples/hello_world_litellm_add_function_to_prompt/__init__.py
new file mode 100644
index 000000000..7d5bb0b1c
--- /dev/null
+++ b/contributing/samples/hello_world_litellm_add_function_to_prompt/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from . import agent
diff --git a/contributing/samples/hello_world_litellm_add_function_to_prompt/agent.py b/contributing/samples/hello_world_litellm_add_function_to_prompt/agent.py
new file mode 100644
index 000000000..0f10621ae
--- /dev/null
+++ b/contributing/samples/hello_world_litellm_add_function_to_prompt/agent.py
@@ -0,0 +1,78 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import random
+
+from google.adk import Agent
+from google.adk.models.lite_llm import LiteLlm
+from langchain_core.utils.function_calling import convert_to_openai_function
+
+
+def roll_die(sides: int) -> int:
+ """Roll a die and return the rolled result.
+
+ Args:
+ sides: The integer number of sides the die has.
+
+ Returns:
+ An integer of the result of rolling the die.
+ """
+ return random.randint(1, sides)
+
+
+def check_prime(number: int) -> str:
+ """Check if a given number is prime.
+
+ Args:
+ number: The input number to check.
+
+ Returns:
+ A str indicating the number is prime or not.
+ """
+ if number <= 1:
+ return f"{number} is not prime."
+ is_prime = True
+ for i in range(2, int(number**0.5) + 1):
+ if number % i == 0:
+ is_prime = False
+ break
+ if is_prime:
+ return f"{number} is prime."
+ else:
+ return f"{number} is not prime."
+
+
+root_agent = Agent(
+ model=LiteLlm(
+ model="vertex_ai/meta/llama-4-maverick-17b-128e-instruct-maas",
+ # If the model is not trained with functions and you would like to
+ # enable function calling, you can add functions to the models, and the
+ # functions will be added to the prompts during inferences.
+ functions=[
+ convert_to_openai_function(roll_die),
+ convert_to_openai_function(check_prime),
+ ],
+ ),
+ name="data_processing_agent",
+ description="""You are a helpful assistant.""",
+ instruction="""
+ You are a helpful assistant, and call tools optionally.
+ If call tools, the tool format should be in json, and the tool arguments should be parsed from users inputs.
+ """,
+ tools=[
+ roll_die,
+ check_prime,
+ ],
+)
diff --git a/contributing/samples/hello_world_litellm_add_function_to_prompt/main.py b/contributing/samples/hello_world_litellm_add_function_to_prompt/main.py
new file mode 100644
index 000000000..123ba1368
--- /dev/null
+++ b/contributing/samples/hello_world_litellm_add_function_to_prompt/main.py
@@ -0,0 +1,81 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import asyncio
+import time
+
+import agent
+from dotenv import load_dotenv
+from google.adk import Runner
+from google.adk.artifacts import InMemoryArtifactService
+from google.adk.cli.utils import logs
+from google.adk.sessions import InMemorySessionService
+from google.adk.sessions import Session
+from google.genai import types
+
+load_dotenv(override=True)
+logs.log_to_tmp_folder()
+
+
+async def main():
+ app_name = 'my_app'
+ user_id_1 = 'user1'
+ session_service = InMemorySessionService()
+ artifact_service = InMemoryArtifactService()
+ runner = Runner(
+ app_name=app_name,
+ agent=agent.root_agent,
+ artifact_service=artifact_service,
+ session_service=session_service,
+ )
+ session_11 = await session_service.create_session(
+ app_name=app_name, user_id=user_id_1
+ )
+
+ async def run_prompt(session: Session, new_message: str):
+ content = types.Content(
+ role='user', parts=[types.Part.from_text(text=new_message)]
+ )
+ print('** User says:', content.model_dump(exclude_none=True))
+ async for event in runner.run_async(
+ user_id=user_id_1,
+ session_id=session.id,
+ new_message=content,
+ ):
+ if event.content.parts:
+ part = event.content.parts[0]
+ if part.text:
+ print(f'** {event.author}: {part.text}')
+ if part.function_call:
+ print(f'** {event.author} calls tool: {part.function_call}')
+ if part.function_response:
+ print(
+ f'** {event.author} gets tool response: {part.function_response}'
+ )
+
+ start_time = time.time()
+ print('Start time:', start_time)
+ print('------------------------------------')
+ await run_prompt(session_11, 'Hi, introduce yourself.')
+ await run_prompt(session_11, 'Roll a die with 100 sides.')
+ await run_prompt(session_11, 'Check if it is prime.')
+ end_time = time.time()
+ print('------------------------------------')
+ print('End time:', end_time)
+ print('Total time:', end_time - start_time)
+
+
+if __name__ == '__main__':
+ asyncio.run(main())
diff --git a/contributing/samples/live_bidi_streaming_multi_agent/__init__.py b/contributing/samples/live_bidi_streaming_multi_agent/__init__.py
new file mode 100644
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/live_bidi_streaming_multi_agent/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/live_bidi_streaming_multi_agent/agent.py b/contributing/samples/live_bidi_streaming_multi_agent/agent.py
new file mode 100644
index 000000000..09b08e32e
--- /dev/null
+++ b/contributing/samples/live_bidi_streaming_multi_agent/agent.py
@@ -0,0 +1,129 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+
+from google.adk.agents import Agent
+from google.adk.examples.example import Example
+from google.adk.tools.example_tool import ExampleTool
+from google.genai import types
+
+
+# --- Roll Die Sub-Agent ---
+def roll_die(sides: int) -> int:
+ """Roll a die and return the rolled result."""
+ return random.randint(1, sides)
+
+
+roll_agent = Agent(
+ name="roll_agent",
+ description="Handles rolling dice of different sizes.",
+ instruction="""
+ You are responsible for rolling dice based on the user's request.
+ When asked to roll a die, you must call the roll_die tool with the number of sides as an integer.
+ """,
+ tools=[roll_die],
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
+
+
+# --- Prime Check Sub-Agent ---
+def check_prime(nums: list[int]) -> str:
+ """Check if a given list of numbers are prime."""
+ primes = set()
+ for number in nums:
+ number = int(number)
+ if number <= 1:
+ continue
+ is_prime = True
+ for i in range(2, int(number**0.5) + 1):
+ if number % i == 0:
+ is_prime = False
+ break
+ if is_prime:
+ primes.add(number)
+ return (
+ "No prime numbers found."
+ if not primes
+ else f"{', '.join(str(num) for num in primes)} are prime numbers."
+ )
+
+
+prime_agent = Agent(
+ name="prime_agent",
+ description="Handles checking if numbers are prime.",
+ instruction="""
+ You are responsible for checking whether numbers are prime.
+ When asked to check primes, you must call the check_prime tool with a list of integers.
+ Never attempt to determine prime numbers manually.
+ Return the prime number results to the root agent.
+ """,
+ tools=[check_prime],
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
+
+
+def get_current_weather(location: str):
+ """
+ Returns the current weather.
+ """
+ if location == "New York":
+ return "Sunny"
+ else:
+ return "Raining"
+
+
+root_agent = Agent(
+ # find supported models here: https://google.github.io/adk-docs/get-started/streaming/quickstart-streaming/
+ # model='gemini-live-2.5-flash-preview-native-audio', # for Vertex project
+ model="gemini-live-2.5-flash-preview", # for AI studio key
+ name="root_agent",
+ instruction="""
+ You are a helpful assistant that can check time, roll dice and check if numbers are prime.
+ You can check time on your own.
+ You delegate rolling dice tasks to the roll_agent and prime checking tasks to the prime_agent.
+ Follow these steps:
+ 1. If the user asks to roll a die, delegate to the roll_agent.
+ 2. If the user asks to check primes, delegate to the prime_agent.
+ 3. If the user asks to roll a die and then check if the result is prime, call roll_agent first, then pass the result to prime_agent.
+ Always clarify the results before proceeding.
+ """,
+ global_instruction=(
+ "You are DicePrimeBot, ready to roll dice and check prime numbers."
+ ),
+ sub_agents=[roll_agent, prime_agent],
+ tools=[get_current_weather],
+ generate_content_config=types.GenerateContentConfig(
+ safety_settings=[
+ types.SafetySetting( # avoid false alarm about rolling dice.
+ category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
+ threshold=types.HarmBlockThreshold.OFF,
+ ),
+ ]
+ ),
+)
diff --git a/contributing/samples/live_bidi_streaming_multi_agent/readme.md b/contributing/samples/live_bidi_streaming_multi_agent/readme.md
new file mode 100644
index 000000000..27c93b10f
--- /dev/null
+++ b/contributing/samples/live_bidi_streaming_multi_agent/readme.md
@@ -0,0 +1,43 @@
+# Simplistic Live (Bidi-Streaming) Multi-Agent
+This project provides a basic example of a live, bidirectional streaming multi-agent
+designed for testing and experimentation.
+
+You can see full documentation [here](https://google.github.io/adk-docs/streaming/).
+
+## Getting Started
+
+Follow these steps to get the agent up and running:
+
+1. **Start the ADK Web Server**
+ Open your terminal, navigate to the root directory that contains the
+ `live_bidi_streaming_agent` folder, and execute the following command:
+ ```bash
+ adk web
+ ```
+
+2. **Access the ADK Web UI**
+ Once the server is running, open your web browser and navigate to the URL
+ provided in the terminal (it will typically be `http://localhost:8000`).
+
+3. **Select the Agent**
+ In the top-left corner of the ADK Web UI, use the dropdown menu to select
+ this agent.
+
+4. **Start Streaming**
+ Click on either the **Audio** or **Video** icon located near the chat input
+ box to begin the streaming session.
+
+5. **Interact with the Agent**
+ You can now begin talking to the agent, and it will respond in real-time.
+
+## Usage Notes
+
+* You only need to click the **Audio** or **Video** button once to initiate the
+ stream. The current version does not support stopping and restarting the stream
+ by clicking the button again during a session.
+
+## Sample Queries
+
+- Hello, what's the weather in Seattle and New York?
+- Could you roll a 6-sided dice for me?
+- Could you check if the number you rolled is a prime number or not?
diff --git a/contributing/samples/live_bidi_streaming_single_agent/__init__.py b/contributing/samples/live_bidi_streaming_single_agent/__init__.py
new file mode 100755
index 000000000..c48963cdc
--- /dev/null
+++ b/contributing/samples/live_bidi_streaming_single_agent/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import agent
diff --git a/contributing/samples/live_bidi_streaming_agent/agent.py b/contributing/samples/live_bidi_streaming_single_agent/agent.py
similarity index 100%
rename from contributing/samples/live_bidi_streaming_agent/agent.py
rename to contributing/samples/live_bidi_streaming_single_agent/agent.py
diff --git a/contributing/samples/live_bidi_streaming_agent/readme.md b/contributing/samples/live_bidi_streaming_single_agent/readme.md
similarity index 100%
rename from contributing/samples/live_bidi_streaming_agent/readme.md
rename to contributing/samples/live_bidi_streaming_single_agent/readme.md
diff --git a/contributing/samples/mcp_sse_agent/agent.py b/contributing/samples/mcp_sse_agent/agent.py
index 888a88b24..5423bfc6b 100755
--- a/contributing/samples/mcp_sse_agent/agent.py
+++ b/contributing/samples/mcp_sse_agent/agent.py
@@ -16,8 +16,8 @@
import os
from google.adk.agents.llm_agent import LlmAgent
+from google.adk.tools.mcp_tool.mcp_session_manager import SseConnectionParams
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
-from google.adk.tools.mcp_tool.mcp_toolset import SseServerParams
_allowed_path = os.path.dirname(os.path.abspath(__file__))
@@ -31,7 +31,7 @@
""",
tools=[
MCPToolset(
- connection_params=SseServerParams(
+ connection_params=SseConnectionParams(
url='http://localhost:3000/sse',
headers={'Accept': 'text/event-stream'},
),
diff --git a/contributing/samples/oauth_calendar_agent/agent.py b/contributing/samples/oauth_calendar_agent/agent.py
index a1b1dea87..3f966b787 100644
--- a/contributing/samples/oauth_calendar_agent/agent.py
+++ b/contributing/samples/oauth_calendar_agent/agent.py
@@ -13,7 +13,6 @@
# limitations under the License.
from datetime import datetime
-import json
import os
from dotenv import load_dotenv
@@ -27,8 +26,8 @@
from google.adk.auth import AuthCredentialTypes
from google.adk.auth import OAuth2Auth
from google.adk.tools import ToolContext
+from google.adk.tools.authenticated_function_tool import AuthenticatedFunctionTool
from google.adk.tools.google_api_tool import CalendarToolset
-from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
@@ -56,6 +55,7 @@ def list_calendar_events(
end_time: str,
limit: int,
tool_context: ToolContext,
+ credential: AuthCredential,
) -> list[dict]:
"""Search for calendar events.
@@ -80,84 +80,11 @@ def list_calendar_events(
Returns:
list[dict]: A list of events that match the search criteria.
"""
- creds = None
-
- # Check if the tokes were already in the session state, which means the user
- # has already gone through the OAuth flow and successfully authenticated and
- # authorized the tool to access their calendar.
- if "calendar_tool_tokens" in tool_context.state:
- creds = Credentials.from_authorized_user_info(
- tool_context.state["calendar_tool_tokens"], SCOPES
- )
- if not creds or not creds.valid:
- # If the access token is expired, refresh it with the refresh token.
- if creds and creds.expired and creds.refresh_token:
- creds.refresh(Request())
- else:
- auth_scheme = OAuth2(
- flows=OAuthFlows(
- authorizationCode=OAuthFlowAuthorizationCode(
- authorizationUrl="https://accounts.google.com/o/oauth2/auth",
- tokenUrl="https://oauth2.googleapis.com/token",
- scopes={
- "https://www.googleapis.com/auth/calendar": (
- "See, edit, share, and permanently delete all the"
- " calendars you can access using Google Calendar"
- )
- },
- )
- )
- )
- auth_credential = AuthCredential(
- auth_type=AuthCredentialTypes.OAUTH2,
- oauth2=OAuth2Auth(
- client_id=oauth_client_id, client_secret=oauth_client_secret
- ),
- )
- # If the user has not gone through the OAuth flow before, or the refresh
- # token also expired, we need to ask users to go through the OAuth flow.
- # First we check whether the user has just gone through the OAuth flow and
- # Oauth response is just passed back.
- auth_response = tool_context.get_auth_response(
- AuthConfig(
- auth_scheme=auth_scheme, raw_auth_credential=auth_credential
- )
- )
- if auth_response:
- # ADK exchanged the access token already for us
- access_token = auth_response.oauth2.access_token
- refresh_token = auth_response.oauth2.refresh_token
-
- creds = Credentials(
- token=access_token,
- refresh_token=refresh_token,
- token_uri=auth_scheme.flows.authorizationCode.tokenUrl,
- client_id=oauth_client_id,
- client_secret=oauth_client_secret,
- scopes=list(auth_scheme.flows.authorizationCode.scopes.keys()),
- )
- else:
- # If there are no auth response which means the user has not gone
- # through the OAuth flow yet, we need to ask users to go through the
- # OAuth flow.
- tool_context.request_credential(
- AuthConfig(
- auth_scheme=auth_scheme,
- raw_auth_credential=auth_credential,
- )
- )
- # The return value is optional and could be any dict object. It will be
- # wrapped in a dict with key as 'result' and value as the return value
- # if the object returned is not a dict. This response will be passed
- # to LLM to generate a user friendly message. e.g. LLM will tell user:
- # "I need your authorization to access your calendar. Please authorize
- # me so I can check your meetings for today."
- return "Need User Authorization to access their calendar."
- # We store the access token and refresh token in the session state for the
- # next runs. This is just an example. On production, a tool should store
- # those credentials in some secure store or properly encrypt it before store
- # it in the session state.
- tool_context.state["calendar_tool_tokens"] = json.loads(creds.to_json())
+
+ creds = Credentials(
+ token=credential.oauth2.access_token,
+ refresh_token=credential.oauth2.refresh_token,
+ )
service = build("calendar", "v3", credentials=creds)
events_result = (
@@ -208,6 +135,33 @@ def update_time(callback_context: CallbackContext):
Currnet time: {_time}
""",
- tools=[list_calendar_events, calendar_toolset],
+ tools=[
+ AuthenticatedFunctionTool(
+ func=list_calendar_events,
+ auth_config=AuthConfig(
+ auth_scheme=OAuth2(
+ flows=OAuthFlows(
+ authorizationCode=OAuthFlowAuthorizationCode(
+ authorizationUrl=(
+ "https://accounts.google.com/o/oauth2/auth"
+ ),
+ tokenUrl="https://oauth2.googleapis.com/token",
+ scopes={
+ "https://www.googleapis.com/auth/calendar": "",
+ },
+ )
+ )
+ ),
+ raw_auth_credential=AuthCredential(
+ auth_type=AuthCredentialTypes.OAUTH2,
+ oauth2=OAuth2Auth(
+ client_id=oauth_client_id,
+ client_secret=oauth_client_secret,
+ ),
+ ),
+ ),
+ ),
+ calendar_toolset,
+ ],
before_agent_callback=update_time,
)
diff --git a/pyproject.toml b/pyproject.toml
index 8ece4db81..a93443d45 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,6 +25,7 @@ classifiers = [ # List of https://pypi.org/classifiers/
]
dependencies = [
# go/keep-sorted start
+ "PyYAML>=6.0.2", # For APIHubToolset.
"anyio>=4.9.0;python_version>='3.10'", # For MCP Session Manager
"authlib>=1.5.1", # For RestAPI Tool
"click>=8.1.8", # For CLI tools
@@ -34,7 +35,7 @@ dependencies = [
"google-cloud-secret-manager>=2.22.0", # Fetching secrets in RestAPI Tool
"google-cloud-speech>=2.30.0", # For Audio Transcription
"google-cloud-storage>=2.18.0, <3.0.0", # For GCS Artifact service
- "google-genai>=1.17.0", # Google GenAI SDK
+ "google-genai>=1.21.1", # Google GenAI SDK
"graphviz>=0.20.2", # Graphviz for graph rendering
"mcp>=1.8.0;python_version>='3.10'", # For MCP Toolset
"opentelemetry-api>=1.31.0", # OpenTelemetry
@@ -43,7 +44,6 @@ dependencies = [
"pydantic>=2.0, <3.0.0", # For data validation/models
"python-dateutil>=2.9.0.post0", # For Vertext AI Session Service
"python-dotenv>=1.0.0", # To manage environment variables
- "PyYAML>=6.0.2", # For APIHubToolset.
"requests>=2.32.4",
"sqlalchemy>=2.0", # SQL database ORM
"starlette>=0.46.2", # For FastAPI CLI
@@ -70,9 +70,9 @@ dev = [
# go/keep-sorted start
"flit>=3.10.0",
"isort>=6.0.0",
+ "mypy>=1.15.0",
"pyink>=24.10.0",
"pylint>=2.6.0",
- "mypy>=1.15.0",
# go/keep-sorted end
]
@@ -87,6 +87,7 @@ eval = [
"google-cloud-aiplatform[evaluation]>=1.87.0",
"pandas>=2.2.3",
"tabulate>=0.9.0",
+ "rouge-score>=0.1.2",
# go/keep-sorted end
]
@@ -94,10 +95,10 @@ test = [
# go/keep-sorted start
"anthropic>=0.43.0", # For anthropic model tests
"langchain-community>=0.3.17",
- "langgraph>=0.2.60", # For LangGraphAgent
+ # langgraph 0.5 removed langgraph.graph.graph which we depend on
+ "langgraph>=0.2.60, <= 0.4.10", # For LangGraphAgent
"litellm>=1.71.2", # For LiteLLM tests
"llama-index-readers-file>=0.4.0", # For retrieval tests
-
"pytest-asyncio>=0.25.0",
"pytest-mock>=3.14.0",
"pytest-xdist>=3.6.1",
diff --git a/src/google/adk/a2a/converters/event_converter.py b/src/google/adk/a2a/converters/event_converter.py
new file mode 100644
index 000000000..356808aa3
--- /dev/null
+++ b/src/google/adk/a2a/converters/event_converter.py
@@ -0,0 +1,603 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from datetime import datetime
+from datetime import timezone
+import logging
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+import uuid
+
+from a2a.server.events import Event as A2AEvent
+from a2a.types import Artifact
+from a2a.types import DataPart
+from a2a.types import Message
+from a2a.types import Part as A2APart
+from a2a.types import Role
+from a2a.types import Task
+from a2a.types import TaskArtifactUpdateEvent
+from a2a.types import TaskState
+from a2a.types import TaskStatus
+from a2a.types import TaskStatusUpdateEvent
+from a2a.types import TextPart
+from google.genai import types as genai_types
+
+from ...agents.invocation_context import InvocationContext
+from ...events.event import Event
+from ...flows.llm_flows.functions import REQUEST_EUC_FUNCTION_CALL_NAME
+from ...utils.feature_decorator import experimental
+from .part_converter import A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY
+from .part_converter import A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
+from .part_converter import A2A_DATA_PART_METADATA_TYPE_KEY
+from .part_converter import convert_a2a_part_to_genai_part
+from .part_converter import convert_genai_part_to_a2a_part
+from .utils import _get_adk_metadata_key
+
+# Constants
+
+ARTIFACT_ID_SEPARATOR = "-"
+DEFAULT_ERROR_MESSAGE = "An error occurred during processing"
+
+# Logger
+logger = logging.getLogger("google_adk." + __name__)
+
+
+def _serialize_metadata_value(value: Any) -> str:
+ """Safely serializes metadata values to string format.
+
+ Args:
+ value: The value to serialize.
+
+ Returns:
+ String representation of the value.
+ """
+ if hasattr(value, "model_dump"):
+ try:
+ return value.model_dump(exclude_none=True, by_alias=True)
+ except Exception as e:
+ logger.warning("Failed to serialize metadata value: %s", e)
+ return str(value)
+ return str(value)
+
+
+def _get_context_metadata(
+ event: Event, invocation_context: InvocationContext
+) -> Dict[str, str]:
+ """Gets the context metadata for the event.
+
+ Args:
+ event: The ADK event to extract metadata from.
+ invocation_context: The invocation context containing session information.
+
+ Returns:
+ A dictionary containing the context metadata.
+
+ Raises:
+ ValueError: If required fields are missing from event or context.
+ """
+ if not event:
+ raise ValueError("Event cannot be None")
+ if not invocation_context:
+ raise ValueError("Invocation context cannot be None")
+
+ try:
+ metadata = {
+ _get_adk_metadata_key("app_name"): invocation_context.app_name,
+ _get_adk_metadata_key("user_id"): invocation_context.user_id,
+ _get_adk_metadata_key("session_id"): invocation_context.session.id,
+ _get_adk_metadata_key("invocation_id"): event.invocation_id,
+ _get_adk_metadata_key("author"): event.author,
+ }
+
+ # Add optional metadata fields if present
+ optional_fields = [
+ ("branch", event.branch),
+ ("grounding_metadata", event.grounding_metadata),
+ ("custom_metadata", event.custom_metadata),
+ ("usage_metadata", event.usage_metadata),
+ ("error_code", event.error_code),
+ ]
+
+ for field_name, field_value in optional_fields:
+ if field_value is not None:
+ metadata[_get_adk_metadata_key(field_name)] = _serialize_metadata_value(
+ field_value
+ )
+
+ return metadata
+
+ except Exception as e:
+ logger.error("Failed to create context metadata: %s", e)
+ raise
+
+
+def _create_artifact_id(
+ app_name: str, user_id: str, session_id: str, filename: str, version: int
+) -> str:
+ """Creates a unique artifact ID.
+
+ Args:
+ app_name: The application name.
+ user_id: The user ID.
+ session_id: The session ID.
+ filename: The artifact filename.
+ version: The artifact version.
+
+ Returns:
+ A unique artifact ID string.
+ """
+ components = [app_name, user_id, session_id, filename, str(version)]
+ return ARTIFACT_ID_SEPARATOR.join(components)
+
+
+def _convert_artifact_to_a2a_events(
+ event: Event,
+ invocation_context: InvocationContext,
+ filename: str,
+ version: int,
+ task_id: Optional[str] = None,
+ context_id: Optional[str] = None,
+) -> TaskArtifactUpdateEvent:
+ """Converts a new artifact version to an A2A TaskArtifactUpdateEvent.
+
+ Args:
+ event: The ADK event containing the artifact information.
+ invocation_context: The invocation context.
+ filename: The name of the artifact file.
+ version: The version number of the artifact.
+ task_id: Optional task ID to use for generated events. If not provided, new UUIDs will be generated.
+
+ Returns:
+ A TaskArtifactUpdateEvent representing the artifact update.
+
+ Raises:
+ ValueError: If required parameters are invalid.
+ RuntimeError: If artifact loading fails.
+ """
+ if not filename:
+ raise ValueError("Filename cannot be empty")
+ if version < 0:
+ raise ValueError("Version must be non-negative")
+
+ try:
+ artifact_part = invocation_context.artifact_service.load_artifact(
+ app_name=invocation_context.app_name,
+ user_id=invocation_context.user_id,
+ session_id=invocation_context.session.id,
+ filename=filename,
+ version=version,
+ )
+
+ converted_part = convert_genai_part_to_a2a_part(part=artifact_part)
+ if not converted_part:
+ raise RuntimeError(f"Failed to convert artifact part for {filename}")
+
+ artifact_id = _create_artifact_id(
+ invocation_context.app_name,
+ invocation_context.user_id,
+ invocation_context.session.id,
+ filename,
+ version,
+ )
+
+ return TaskArtifactUpdateEvent(
+ taskId=task_id,
+ append=False,
+ contextId=context_id,
+ lastChunk=True,
+ artifact=Artifact(
+ artifactId=artifact_id,
+ name=filename,
+ metadata={
+ "filename": filename,
+ "version": version,
+ },
+ parts=[converted_part],
+ ),
+ )
+ except Exception as e:
+ logger.error(
+ "Failed to convert artifact for %s, version %s: %s",
+ filename,
+ version,
+ e,
+ )
+ raise RuntimeError(f"Artifact conversion failed: {e}") from e
+
+
+def _process_long_running_tool(a2a_part: A2APart, event: Event) -> None:
+ """Processes long-running tool metadata for an A2A part.
+
+ Args:
+ a2a_part: The A2A part to potentially mark as long-running.
+ event: The ADK event containing long-running tool information.
+ """
+ if (
+ isinstance(a2a_part.root, DataPart)
+ and event.long_running_tool_ids
+ and a2a_part.root.metadata
+ and a2a_part.root.metadata.get(
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)
+ )
+ == A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
+ and a2a_part.root.data.get("id") in event.long_running_tool_ids
+ ):
+ a2a_part.root.metadata[
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY)
+ ] = True
+
+
+def convert_a2a_task_to_event(
+ a2a_task: Task,
+ author: Optional[str] = None,
+ invocation_context: Optional[InvocationContext] = None,
+) -> Event:
+ """Converts an A2A task to an ADK event.
+
+ Args:
+ a2a_task: The A2A task to convert. Must not be None.
+ author: The author of the event. Defaults to "a2a agent" if not provided.
+ invocation_context: The invocation context containing session information.
+ If provided, the branch will be set from the context.
+
+ Returns:
+ An ADK Event object representing the converted task.
+
+ Raises:
+ ValueError: If a2a_task is None.
+ RuntimeError: If conversion of the underlying message fails.
+ """
+ if a2a_task is None:
+ raise ValueError("A2A task cannot be None")
+
+ try:
+ # Extract message from task status or history
+ message = None
+ if a2a_task.status and a2a_task.status.message:
+ message = a2a_task.status.message
+ elif a2a_task.history:
+ message = a2a_task.history[-1]
+
+ # Convert message if available
+ if message:
+ try:
+ return convert_a2a_message_to_event(message, author, invocation_context)
+ except Exception as e:
+ logger.error("Failed to convert A2A task message to event: %s", e)
+ raise RuntimeError(f"Failed to convert task message: {e}") from e
+
+ # Create minimal event if no message is available
+ return Event(
+ invocation_id=(
+ invocation_context.invocation_id
+ if invocation_context
+ else str(uuid.uuid4())
+ ),
+ author=author or "a2a agent",
+ branch=invocation_context.branch if invocation_context else None,
+ )
+
+ except Exception as e:
+ logger.error("Failed to convert A2A task to event: %s", e)
+ raise
+
+
+@experimental
+def convert_a2a_message_to_event(
+ a2a_message: Message,
+ author: Optional[str] = None,
+ invocation_context: Optional[InvocationContext] = None,
+) -> Event:
+ """Converts an A2A message to an ADK event.
+
+ Args:
+ a2a_message: The A2A message to convert. Must not be None.
+ author: The author of the event. Defaults to "a2a agent" if not provided.
+ invocation_context: The invocation context containing session information.
+ If provided, the branch will be set from the context.
+
+ Returns:
+ An ADK Event object with converted content and long-running tool metadata.
+
+ Raises:
+ ValueError: If a2a_message is None.
+ RuntimeError: If conversion of message parts fails.
+ """
+ if a2a_message is None:
+ raise ValueError("A2A message cannot be None")
+
+ if not a2a_message.parts:
+ logger.warning(
+ "A2A message has no parts, creating event with empty content"
+ )
+ return Event(
+ invocation_id=(
+ invocation_context.invocation_id
+ if invocation_context
+ else str(uuid.uuid4())
+ ),
+ author=author or "a2a agent",
+ branch=invocation_context.branch if invocation_context else None,
+ content=genai_types.Content(role="model", parts=[]),
+ )
+
+ try:
+ parts = []
+ long_running_tool_ids = set()
+
+ for a2a_part in a2a_message.parts:
+ try:
+ part = convert_a2a_part_to_genai_part(a2a_part)
+ if part is None:
+ logger.warning("Failed to convert A2A part, skipping: %s", a2a_part)
+ continue
+
+ # Check for long-running tools
+ if (
+ a2a_part.root.metadata
+ and a2a_part.root.metadata.get(
+ _get_adk_metadata_key(
+ A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY
+ )
+ )
+ is True
+ ):
+ long_running_tool_ids.add(part.function_call.id)
+
+ parts.append(part)
+
+ except Exception as e:
+ logger.error("Failed to convert A2A part: %s, error: %s", a2a_part, e)
+ # Continue processing other parts instead of failing completely
+ continue
+
+ if not parts:
+ logger.warning(
+ "No parts could be converted from A2A message %s", a2a_message
+ )
+
+ return Event(
+ invocation_id=(
+ invocation_context.invocation_id
+ if invocation_context
+ else str(uuid.uuid4())
+ ),
+ author=author or "a2a agent",
+ branch=invocation_context.branch if invocation_context else None,
+ long_running_tool_ids=long_running_tool_ids
+ if long_running_tool_ids
+ else None,
+ content=genai_types.Content(
+ role="model",
+ parts=parts,
+ ),
+ )
+
+ except Exception as e:
+ logger.error("Failed to convert A2A message to event: %s", e)
+ raise RuntimeError(f"Failed to convert message: {e}") from e
+
+
+@experimental
+def convert_event_to_a2a_message(
+ event: Event, invocation_context: InvocationContext, role: Role = Role.agent
+) -> Optional[Message]:
+ """Converts an ADK event to an A2A message.
+
+ Args:
+ event: The ADK event to convert.
+ invocation_context: The invocation context.
+
+ Returns:
+ An A2A Message if the event has content, None otherwise.
+
+ Raises:
+ ValueError: If required parameters are invalid.
+ """
+ if not event:
+ raise ValueError("Event cannot be None")
+ if not invocation_context:
+ raise ValueError("Invocation context cannot be None")
+
+ if not event.content or not event.content.parts:
+ return None
+
+ try:
+ a2a_parts = []
+ for part in event.content.parts:
+ a2a_part = convert_genai_part_to_a2a_part(part)
+ if a2a_part:
+ a2a_parts.append(a2a_part)
+ _process_long_running_tool(a2a_part, event)
+
+ if a2a_parts:
+ return Message(messageId=str(uuid.uuid4()), role=role, parts=a2a_parts)
+
+ except Exception as e:
+ logger.error("Failed to convert event to status message: %s", e)
+ raise
+
+ return None
+
+
+def _create_error_status_event(
+ event: Event,
+ invocation_context: InvocationContext,
+ task_id: Optional[str] = None,
+ context_id: Optional[str] = None,
+) -> TaskStatusUpdateEvent:
+ """Creates a TaskStatusUpdateEvent for error scenarios.
+
+ Args:
+ event: The ADK event containing error information.
+ invocation_context: The invocation context.
+ task_id: Optional task ID to use for generated events.
+ context_id: Optional Context ID to use for generated events.
+
+ Returns:
+ A TaskStatusUpdateEvent with FAILED state.
+ """
+ error_message = getattr(event, "error_message", None) or DEFAULT_ERROR_MESSAGE
+
+ # Get context metadata and add error code
+ event_metadata = _get_context_metadata(event, invocation_context)
+ if event.error_code:
+ event_metadata[_get_adk_metadata_key("error_code")] = str(event.error_code)
+
+ return TaskStatusUpdateEvent(
+ taskId=task_id,
+ contextId=context_id,
+ metadata=event_metadata,
+ status=TaskStatus(
+ state=TaskState.failed,
+ message=Message(
+ messageId=str(uuid.uuid4()),
+ role=Role.agent,
+ parts=[TextPart(text=error_message)],
+ metadata={
+ _get_adk_metadata_key("error_code"): str(event.error_code)
+ }
+ if event.error_code
+ else {},
+ ),
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ ),
+ final=False,
+ )
+
+
+def _create_status_update_event(
+ message: Message,
+ invocation_context: InvocationContext,
+ event: Event,
+ task_id: Optional[str] = None,
+ context_id: Optional[str] = None,
+) -> TaskStatusUpdateEvent:
+ """Creates a TaskStatusUpdateEvent for running scenarios.
+
+ Args:
+ message: The A2A message to include.
+ invocation_context: The invocation context.
+ event: The ADK event.
+ task_id: Optional task ID to use for generated events.
+ context_id: Optional Context ID to use for generated events.
+
+
+ Returns:
+ A TaskStatusUpdateEvent with RUNNING state.
+ """
+ status = TaskStatus(
+ state=TaskState.working,
+ message=message,
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ )
+
+ if any(
+ part.root.metadata.get(
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)
+ )
+ == A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
+ and part.root.metadata.get(
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY)
+ )
+ is True
+ and part.root.data.get("name") == REQUEST_EUC_FUNCTION_CALL_NAME
+ for part in message.parts
+ if part.root.metadata
+ ):
+ status.state = TaskState.auth_required
+ elif any(
+ part.root.metadata.get(
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)
+ )
+ == A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
+ and part.root.metadata.get(
+ _get_adk_metadata_key(A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY)
+ )
+ is True
+ for part in message.parts
+ if part.root.metadata
+ ):
+ status.state = TaskState.input_required
+
+ return TaskStatusUpdateEvent(
+ taskId=task_id,
+ contextId=context_id,
+ status=status,
+ metadata=_get_context_metadata(event, invocation_context),
+ final=False,
+ )
+
+
+@experimental
+def convert_event_to_a2a_events(
+ event: Event,
+ invocation_context: InvocationContext,
+ task_id: Optional[str] = None,
+ context_id: Optional[str] = None,
+) -> List[A2AEvent]:
+ """Converts a GenAI event to a list of A2A events.
+
+ Args:
+ event: The ADK event to convert.
+ invocation_context: The invocation context.
+ task_id: Optional task ID to use for generated events.
+ context_id: Optional Context ID to use for generated events.
+
+ Returns:
+ A list of A2A events representing the converted ADK event.
+
+ Raises:
+ ValueError: If required parameters are invalid.
+ """
+ if not event:
+ raise ValueError("Event cannot be None")
+ if not invocation_context:
+ raise ValueError("Invocation context cannot be None")
+
+ a2a_events = []
+
+ try:
+ # Handle artifact deltas
+ if event.actions.artifact_delta:
+ for filename, version in event.actions.artifact_delta.items():
+ artifact_event = _convert_artifact_to_a2a_events(
+ event, invocation_context, filename, version, task_id, context_id
+ )
+ a2a_events.append(artifact_event)
+
+ # Handle error scenarios
+ if event.error_code:
+ error_event = _create_error_status_event(
+ event, invocation_context, task_id, context_id
+ )
+ a2a_events.append(error_event)
+
+ # Handle regular message content
+ message = convert_event_to_a2a_message(event, invocation_context)
+ if message:
+ running_event = _create_status_update_event(
+ message, invocation_context, event, task_id, context_id
+ )
+ a2a_events.append(running_event)
+
+ except Exception as e:
+ logger.error("Failed to convert event to A2A events: %s", e)
+ raise
+
+ return a2a_events
diff --git a/src/google/adk/a2a/converters/part_converter.py b/src/google/adk/a2a/converters/part_converter.py
index 1c51fd7c1..04387cccf 100644
--- a/src/google/adk/a2a/converters/part_converter.py
+++ b/src/google/adk/a2a/converters/part_converter.py
@@ -18,23 +18,40 @@
from __future__ import annotations
+import base64
import json
import logging
from typing import Optional
-from a2a import types as a2a_types
+from .utils import _get_adk_metadata_key
+
+try:
+ from a2a import types as a2a_types
+except ImportError as e:
+ import sys
+
+ if sys.version_info < (3, 10):
+ raise ImportError(
+ 'A2A requires Python 3.10 or above. Please upgrade your Python version.'
+ ) from e
+ else:
+ raise e
+
from google.genai import types as genai_types
-from ...utils.feature_decorator import working_in_progress
+from ...utils.feature_decorator import experimental
logger = logging.getLogger('google_adk.' + __name__)
A2A_DATA_PART_METADATA_TYPE_KEY = 'type'
+A2A_DATA_PART_METADATA_IS_LONG_RUNNING_KEY = 'is_long_running'
A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL = 'function_call'
A2A_DATA_PART_METADATA_TYPE_FUNCTION_RESPONSE = 'function_response'
+A2A_DATA_PART_METADATA_TYPE_CODE_EXECUTION_RESULT = 'code_execution_result'
+A2A_DATA_PART_METADATA_TYPE_EXECUTABLE_CODE = 'executable_code'
-@working_in_progress
+@experimental
def convert_a2a_part_to_genai_part(
a2a_part: a2a_types.Part,
) -> Optional[genai_types.Part]:
@@ -54,7 +71,8 @@ def convert_a2a_part_to_genai_part(
elif isinstance(part.file, a2a_types.FileWithBytes):
return genai_types.Part(
inline_data=genai_types.Blob(
- data=part.file.bytes.encode('utf-8'), mime_type=part.file.mimeType
+ data=base64.b64decode(part.file.bytes),
+ mime_type=part.file.mimeType,
)
)
else:
@@ -71,9 +89,13 @@ def convert_a2a_part_to_genai_part(
# response.
# TODO once A2A defined how to suervice such information, migrate below
# logic accordinlgy
- if part.metadata and A2A_DATA_PART_METADATA_TYPE_KEY in part.metadata:
+ if (
+ part.metadata
+ and _get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)
+ in part.metadata
+ ):
if (
- part.metadata[A2A_DATA_PART_METADATA_TYPE_KEY]
+ part.metadata[_get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)]
== A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
):
return genai_types.Part(
@@ -82,7 +104,7 @@ def convert_a2a_part_to_genai_part(
)
)
if (
- part.metadata[A2A_DATA_PART_METADATA_TYPE_KEY]
+ part.metadata[_get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)]
== A2A_DATA_PART_METADATA_TYPE_FUNCTION_RESPONSE
):
return genai_types.Part(
@@ -90,6 +112,24 @@ def convert_a2a_part_to_genai_part(
part.data, by_alias=True
)
)
+ if (
+ part.metadata[_get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)]
+ == A2A_DATA_PART_METADATA_TYPE_CODE_EXECUTION_RESULT
+ ):
+ return genai_types.Part(
+ code_execution_result=genai_types.CodeExecutionResult.model_validate(
+ part.data, by_alias=True
+ )
+ )
+ if (
+ part.metadata[_get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY)]
+ == A2A_DATA_PART_METADATA_TYPE_EXECUTABLE_CODE
+ ):
+ return genai_types.Part(
+ executable_code=genai_types.ExecutableCode.model_validate(
+ part.data, by_alias=True
+ )
+ )
return genai_types.Part(text=json.dumps(part.data))
logger.warning(
@@ -100,32 +140,45 @@ def convert_a2a_part_to_genai_part(
return None
-@working_in_progress
+@experimental
def convert_genai_part_to_a2a_part(
part: genai_types.Part,
) -> Optional[a2a_types.Part]:
"""Convert a Google GenAI Part to an A2A Part."""
+
if part.text:
- return a2a_types.TextPart(text=part.text)
+ a2a_part = a2a_types.TextPart(text=part.text)
+ if part.thought is not None:
+ a2a_part.metadata = {_get_adk_metadata_key('thought'): part.thought}
+ return a2a_types.Part(root=a2a_part)
if part.file_data:
- return a2a_types.FilePart(
- file=a2a_types.FileWithUri(
- uri=part.file_data.file_uri,
- mimeType=part.file_data.mime_type,
+ return a2a_types.Part(
+ root=a2a_types.FilePart(
+ file=a2a_types.FileWithUri(
+ uri=part.file_data.file_uri,
+ mimeType=part.file_data.mime_type,
+ )
)
)
if part.inline_data:
- return a2a_types.Part(
- root=a2a_types.FilePart(
- file=a2a_types.FileWithBytes(
- bytes=part.inline_data.data,
- mimeType=part.inline_data.mime_type,
- )
+ a2a_part = a2a_types.FilePart(
+ file=a2a_types.FileWithBytes(
+ bytes=base64.b64encode(part.inline_data.data).decode('utf-8'),
+ mimeType=part.inline_data.mime_type,
)
)
+ if part.video_metadata:
+ a2a_part.metadata = {
+ _get_adk_metadata_key(
+ 'video_metadata'
+ ): part.video_metadata.model_dump(by_alias=True, exclude_none=True)
+ }
+
+ return a2a_types.Part(root=a2a_part)
+
# Conver the funcall and function reponse to A2A DataPart.
# This is mainly for converting human in the loop and auth request and
# response.
@@ -138,9 +191,9 @@ def convert_genai_part_to_a2a_part(
by_alias=True, exclude_none=True
),
metadata={
- A2A_DATA_PART_METADATA_TYPE_KEY: (
- A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
- )
+ _get_adk_metadata_key(
+ A2A_DATA_PART_METADATA_TYPE_KEY
+ ): A2A_DATA_PART_METADATA_TYPE_FUNCTION_CALL
},
)
)
@@ -152,9 +205,37 @@ def convert_genai_part_to_a2a_part(
by_alias=True, exclude_none=True
),
metadata={
- A2A_DATA_PART_METADATA_TYPE_KEY: (
- A2A_DATA_PART_METADATA_TYPE_FUNCTION_RESPONSE
- )
+ _get_adk_metadata_key(
+ A2A_DATA_PART_METADATA_TYPE_KEY
+ ): A2A_DATA_PART_METADATA_TYPE_FUNCTION_RESPONSE
+ },
+ )
+ )
+
+ if part.code_execution_result:
+ return a2a_types.Part(
+ root=a2a_types.DataPart(
+ data=part.code_execution_result.model_dump(
+ by_alias=True, exclude_none=True
+ ),
+ metadata={
+ _get_adk_metadata_key(
+ A2A_DATA_PART_METADATA_TYPE_KEY
+ ): A2A_DATA_PART_METADATA_TYPE_CODE_EXECUTION_RESULT
+ },
+ )
+ )
+
+ if part.executable_code:
+ return a2a_types.Part(
+ root=a2a_types.DataPart(
+ data=part.executable_code.model_dump(
+ by_alias=True, exclude_none=True
+ ),
+ metadata={
+ _get_adk_metadata_key(
+ A2A_DATA_PART_METADATA_TYPE_KEY
+ ): A2A_DATA_PART_METADATA_TYPE_EXECUTABLE_CODE
},
)
)
diff --git a/src/google/adk/a2a/converters/request_converter.py b/src/google/adk/a2a/converters/request_converter.py
new file mode 100644
index 000000000..168de49b7
--- /dev/null
+++ b/src/google/adk/a2a/converters/request_converter.py
@@ -0,0 +1,70 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import sys
+from typing import Any
+
+try:
+ from a2a.server.agent_execution import RequestContext
+except ImportError as e:
+ if sys.version_info < (3, 10):
+ raise ImportError(
+ 'A2A Tool requires Python 3.10 or above. Please upgrade your Python'
+ ' version.'
+ ) from e
+ else:
+ raise e
+
+from google.genai import types as genai_types
+
+from ...runners import RunConfig
+from ...utils.feature_decorator import experimental
+from .part_converter import convert_a2a_part_to_genai_part
+
+
+def _get_user_id(request: RequestContext) -> str:
+ # Get user from call context if available (auth is enabled on a2a server)
+ if (
+ request.call_context
+ and request.call_context.user
+ and request.call_context.user.user_name
+ ):
+ return request.call_context.user.user_name
+
+ # Get user from context id
+ return f'A2A_USER_{request.context_id}'
+
+
+@experimental
+def convert_a2a_request_to_adk_run_args(
+ request: RequestContext,
+) -> dict[str, Any]:
+
+ if not request.message:
+ raise ValueError('Request message cannot be None')
+
+ return {
+ 'user_id': _get_user_id(request),
+ 'session_id': request.context_id,
+ 'new_message': genai_types.Content(
+ role='user',
+ parts=[
+ convert_a2a_part_to_genai_part(part)
+ for part in request.message.parts
+ ],
+ ),
+ 'run_config': RunConfig(),
+ }
diff --git a/src/google/adk/a2a/converters/utils.py b/src/google/adk/a2a/converters/utils.py
new file mode 100644
index 000000000..acb2581d4
--- /dev/null
+++ b/src/google/adk/a2a/converters/utils.py
@@ -0,0 +1,89 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+ADK_METADATA_KEY_PREFIX = "adk_"
+ADK_CONTEXT_ID_PREFIX = "ADK"
+ADK_CONTEXT_ID_SEPARATOR = "/"
+
+
+def _get_adk_metadata_key(key: str) -> str:
+ """Gets the A2A event metadata key for the given key.
+
+ Args:
+ key: The metadata key to prefix.
+
+ Returns:
+ The prefixed metadata key.
+
+ Raises:
+ ValueError: If key is empty or None.
+ """
+ if not key:
+ raise ValueError("Metadata key cannot be empty or None")
+ return f"{ADK_METADATA_KEY_PREFIX}{key}"
+
+
+def _to_a2a_context_id(app_name: str, user_id: str, session_id: str) -> str:
+ """Converts app name, user id and session id to an A2A context id.
+
+ Args:
+ app_name: The app name.
+ user_id: The user id.
+ session_id: The session id.
+
+ Returns:
+ The A2A context id.
+
+ Raises:
+ ValueError: If any of the input parameters are empty or None.
+ """
+ if not all([app_name, user_id, session_id]):
+ raise ValueError(
+ "All parameters (app_name, user_id, session_id) must be non-empty"
+ )
+ return ADK_CONTEXT_ID_SEPARATOR.join(
+ [ADK_CONTEXT_ID_PREFIX, app_name, user_id, session_id]
+ )
+
+
+def _from_a2a_context_id(context_id: str) -> tuple[str, str, str]:
+ """Converts an A2A context id to app name, user id and session id.
+ if context_id is None, return None, None, None
+ if context_id is not None, but not in the format of
+ ADK$app_name$user_id$session_id, return None, None, None
+
+ Args:
+ context_id: The A2A context id.
+
+ Returns:
+ The app name, user id and session id.
+ """
+ if not context_id:
+ return None, None, None
+
+ try:
+ parts = context_id.split(ADK_CONTEXT_ID_SEPARATOR)
+ if len(parts) != 4:
+ return None, None, None
+
+ prefix, app_name, user_id, session_id = parts
+ if prefix == ADK_CONTEXT_ID_PREFIX and app_name and user_id and session_id:
+ return app_name, user_id, session_id
+ except ValueError:
+ # Handle any split errors gracefully
+ pass
+
+ return None, None, None
diff --git a/src/google/adk/a2a/executor/__init__.py b/src/google/adk/a2a/executor/__init__.py
new file mode 100644
index 000000000..0a2669d7a
--- /dev/null
+++ b/src/google/adk/a2a/executor/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/google/adk/a2a/executor/a2a_agent_executor.py b/src/google/adk/a2a/executor/a2a_agent_executor.py
new file mode 100644
index 000000000..1d5545ed3
--- /dev/null
+++ b/src/google/adk/a2a/executor/a2a_agent_executor.py
@@ -0,0 +1,260 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from datetime import datetime
+from datetime import timezone
+import inspect
+import logging
+from typing import Any
+from typing import Awaitable
+from typing import Callable
+from typing import Optional
+import uuid
+
+try:
+ from a2a.server.agent_execution import AgentExecutor
+ from a2a.server.agent_execution.context import RequestContext
+ from a2a.server.events.event_queue import EventQueue
+ from a2a.types import Message
+ from a2a.types import Role
+ from a2a.types import TaskState
+ from a2a.types import TaskStatus
+ from a2a.types import TaskStatusUpdateEvent
+ from a2a.types import TextPart
+
+except ImportError as e:
+ import sys
+
+ if sys.version_info < (3, 10):
+ raise ImportError(
+ 'A2A requires Python 3.10 or above. Please upgrade your Python version.'
+ ) from e
+ else:
+ raise e
+from google.adk.runners import Runner
+from pydantic import BaseModel
+from typing_extensions import override
+
+from ...utils.feature_decorator import experimental
+from ..converters.event_converter import convert_event_to_a2a_events
+from ..converters.request_converter import convert_a2a_request_to_adk_run_args
+from ..converters.utils import _get_adk_metadata_key
+from .task_result_aggregator import TaskResultAggregator
+
+logger = logging.getLogger('google_adk.' + __name__)
+
+
+@experimental
+class A2aAgentExecutorConfig(BaseModel):
+ """Configuration for the A2aAgentExecutor."""
+
+ pass
+
+
+@experimental
+class A2aAgentExecutor(AgentExecutor):
+ """An AgentExecutor that runs an ADK Agent against an A2A request and
+ publishes updates to an event queue.
+ """
+
+ def __init__(
+ self,
+ *,
+ runner: Runner | Callable[..., Runner | Awaitable[Runner]],
+ config: Optional[A2aAgentExecutorConfig] = None,
+ ):
+ super().__init__()
+ self._runner = runner
+ self._config = config
+
+ async def _resolve_runner(self) -> Runner:
+ """Resolve the runner, handling cases where it's a callable that returns a Runner."""
+ # If already resolved and cached, return it
+ if isinstance(self._runner, Runner):
+ return self._runner
+ if callable(self._runner):
+ # Call the function to get the runner
+ result = self._runner()
+
+ # Handle async callables
+ if inspect.iscoroutine(result):
+ resolved_runner = await result
+ else:
+ resolved_runner = result
+
+ # Cache the resolved runner for future calls
+ self._runner = resolved_runner
+ return resolved_runner
+
+ raise TypeError(
+ 'Runner must be a Runner instance or a callable that returns a'
+ f' Runner, got {type(self._runner)}'
+ )
+
+ @override
+ async def cancel(self, context: RequestContext, event_queue: EventQueue):
+ """Cancel the execution."""
+ # TODO: Implement proper cancellation logic if needed
+ raise NotImplementedError('Cancellation is not supported')
+
+ @override
+ async def execute(
+ self,
+ context: RequestContext,
+ event_queue: EventQueue,
+ ):
+ """Executes an A2A request and publishes updates to the event queue
+ specified. It runs as following:
+ * Takes the input from the A2A request
+ * Convert the input to ADK input content, and runs the ADK agent
+ * Collects output events of the underlying ADK Agent
+ * Converts the ADK output events into A2A task updates
+ * Publishes the updates back to A2A server via event queue
+ """
+ if not context.message:
+ raise ValueError('A2A request must have a message')
+
+ # for new task, create a task submitted event
+ if not context.current_task:
+ await event_queue.enqueue_event(
+ TaskStatusUpdateEvent(
+ taskId=context.task_id,
+ status=TaskStatus(
+ state=TaskState.submitted,
+ message=context.message,
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ ),
+ contextId=context.context_id,
+ final=False,
+ )
+ )
+
+ # Handle the request and publish updates to the event queue
+ try:
+ await self._handle_request(context, event_queue)
+ except Exception as e:
+ logger.error('Error handling A2A request: %s', e, exc_info=True)
+ # Publish failure event
+ try:
+ await event_queue.enqueue_event(
+ TaskStatusUpdateEvent(
+ taskId=context.task_id,
+ status=TaskStatus(
+ state=TaskState.failed,
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ message=Message(
+ messageId=str(uuid.uuid4()),
+ role=Role.agent,
+ parts=[TextPart(text=str(e))],
+ ),
+ ),
+ contextId=context.context_id,
+ final=True,
+ )
+ )
+ except Exception as enqueue_error:
+ logger.error(
+ 'Failed to publish failure event: %s', enqueue_error, exc_info=True
+ )
+
+ async def _handle_request(
+ self,
+ context: RequestContext,
+ event_queue: EventQueue,
+ ):
+ # Resolve the runner instance
+ runner = await self._resolve_runner()
+
+ # Convert the a2a request to ADK run args
+ run_args = convert_a2a_request_to_adk_run_args(context)
+
+ # ensure the session exists
+ session = await self._prepare_session(context, run_args, runner)
+
+ # create invocation context
+ invocation_context = runner._new_invocation_context(
+ session=session,
+ new_message=run_args['new_message'],
+ run_config=run_args['run_config'],
+ )
+
+ # publish the task working event
+ await event_queue.enqueue_event(
+ TaskStatusUpdateEvent(
+ taskId=context.task_id,
+ status=TaskStatus(
+ state=TaskState.working,
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ ),
+ contextId=context.context_id,
+ final=False,
+ metadata={
+ _get_adk_metadata_key('app_name'): runner.app_name,
+ _get_adk_metadata_key('user_id'): run_args['user_id'],
+ _get_adk_metadata_key('session_id'): run_args['session_id'],
+ },
+ )
+ )
+
+ task_result_aggregator = TaskResultAggregator()
+ async for adk_event in runner.run_async(**run_args):
+ for a2a_event in convert_event_to_a2a_events(
+ adk_event, invocation_context, context.task_id, context.context_id
+ ):
+ task_result_aggregator.process_event(a2a_event)
+ await event_queue.enqueue_event(a2a_event)
+
+ # publish the task result event - this is final
+ await event_queue.enqueue_event(
+ TaskStatusUpdateEvent(
+ taskId=context.task_id,
+ status=TaskStatus(
+ state=(
+ task_result_aggregator.task_state
+ if task_result_aggregator.task_state != TaskState.working
+ else TaskState.completed
+ ),
+ timestamp=datetime.now(timezone.utc).isoformat(),
+ message=task_result_aggregator.task_status_message,
+ ),
+ contextId=context.context_id,
+ final=True,
+ )
+ )
+
+ async def _prepare_session(
+ self, context: RequestContext, run_args: dict[str, Any], runner: Runner
+ ):
+
+ session_id = run_args['session_id']
+ # create a new session if not exists
+ user_id = run_args['user_id']
+ session = await runner.session_service.get_session(
+ app_name=runner.app_name,
+ user_id=user_id,
+ session_id=session_id,
+ )
+ if session is None:
+ session = await runner.session_service.create_session(
+ app_name=runner.app_name,
+ user_id=user_id,
+ state={},
+ session_id=session_id,
+ )
+ # Update run_args with the new session_id
+ run_args['session_id'] = session.id
+
+ return session
diff --git a/src/google/adk/a2a/executor/task_result_aggregator.py b/src/google/adk/a2a/executor/task_result_aggregator.py
new file mode 100644
index 000000000..202e80927
--- /dev/null
+++ b/src/google/adk/a2a/executor/task_result_aggregator.py
@@ -0,0 +1,71 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from a2a.server.events import Event
+from a2a.types import Message
+from a2a.types import TaskState
+from a2a.types import TaskStatusUpdateEvent
+
+from ...utils.feature_decorator import experimental
+
+
+@experimental
+class TaskResultAggregator:
+ """Aggregates the task status updates and provides the final task state."""
+
+ def __init__(self):
+ self._task_state = TaskState.working
+ self._task_status_message = None
+
+ def process_event(self, event: Event):
+ """Process an event from the agent run and detect signals about the task status.
+ Priority of task state:
+ - failed
+ - auth_required
+ - input_required
+ - working
+ """
+ if isinstance(event, TaskStatusUpdateEvent):
+ if event.status.state == TaskState.failed:
+ self._task_state = TaskState.failed
+ self._task_status_message = event.status.message
+ elif (
+ event.status.state == TaskState.auth_required
+ and self._task_state != TaskState.failed
+ ):
+ self._task_state = TaskState.auth_required
+ self._task_status_message = event.status.message
+ elif (
+ event.status.state == TaskState.input_required
+ and self._task_state
+ not in (TaskState.failed, TaskState.auth_required)
+ ):
+ self._task_state = TaskState.input_required
+ self._task_status_message = event.status.message
+ # final state is already recorded and make sure the intermediate state is
+ # always working because other state may terminate the event aggregation
+ # in a2a request handler
+ elif self._task_state == TaskState.working:
+ self._task_status_message = event.status.message
+ event.status.state = TaskState.working
+
+ @property
+ def task_state(self) -> TaskState:
+ return self._task_state
+
+ @property
+ def task_status_message(self) -> Message | None:
+ return self._task_status_message
diff --git a/src/google/adk/a2a/logs/__init__.py b/src/google/adk/a2a/logs/__init__.py
new file mode 100644
index 000000000..0a2669d7a
--- /dev/null
+++ b/src/google/adk/a2a/logs/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/google/adk/a2a/logs/log_utils.py b/src/google/adk/a2a/logs/log_utils.py
new file mode 100644
index 000000000..b3891514c
--- /dev/null
+++ b/src/google/adk/a2a/logs/log_utils.py
@@ -0,0 +1,349 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for structured A2A request and response logging."""
+
+from __future__ import annotations
+
+import json
+import sys
+
+try:
+ from a2a.types import DataPart as A2ADataPart
+ from a2a.types import Message as A2AMessage
+ from a2a.types import Part as A2APart
+ from a2a.types import SendMessageRequest
+ from a2a.types import SendMessageResponse
+ from a2a.types import Task as A2ATask
+ from a2a.types import TextPart as A2ATextPart
+except ImportError as e:
+ if sys.version_info < (3, 10):
+ raise ImportError(
+ "A2A Tool requires Python 3.10 or above. Please upgrade your Python"
+ " version."
+ ) from e
+ else:
+ raise e
+
+
+# Constants
+_NEW_LINE = "\n"
+_EXCLUDED_PART_FIELD = {"file": {"bytes"}}
+
+
+def _is_a2a_task(obj) -> bool:
+ """Check if an object is an A2A Task, with fallback for isinstance issues."""
+ try:
+ return isinstance(obj, A2ATask)
+ except (TypeError, AttributeError):
+ return type(obj).__name__ == "Task" and hasattr(obj, "status")
+
+
+def _is_a2a_message(obj) -> bool:
+ """Check if an object is an A2A Message, with fallback for isinstance issues."""
+ try:
+ return isinstance(obj, A2AMessage)
+ except (TypeError, AttributeError):
+ return type(obj).__name__ == "Message" and hasattr(obj, "role")
+
+
+def _is_a2a_text_part(obj) -> bool:
+ """Check if an object is an A2A TextPart, with fallback for isinstance issues."""
+ try:
+ return isinstance(obj, A2ATextPart)
+ except (TypeError, AttributeError):
+ return type(obj).__name__ == "TextPart" and hasattr(obj, "text")
+
+
+def _is_a2a_data_part(obj) -> bool:
+ """Check if an object is an A2A DataPart, with fallback for isinstance issues."""
+ try:
+ return isinstance(obj, A2ADataPart)
+ except (TypeError, AttributeError):
+ return type(obj).__name__ == "DataPart" and hasattr(obj, "data")
+
+
+def build_message_part_log(part: A2APart) -> str:
+ """Builds a log representation of an A2A message part.
+
+ Args:
+ part: The A2A message part to log.
+
+ Returns:
+ A string representation of the part.
+ """
+ part_content = ""
+ if _is_a2a_text_part(part.root):
+ part_content = f"TextPart: {part.root.text[:100]}" + (
+ "..." if len(part.root.text) > 100 else ""
+ )
+ elif _is_a2a_data_part(part.root):
+ # For data parts, show the data keys but exclude large values
+ data_summary = {
+ k: (
+ f"<{type(v).__name__}>"
+ if isinstance(v, (dict, list)) and len(str(v)) > 100
+ else v
+ )
+ for k, v in part.root.data.items()
+ }
+ part_content = f"DataPart: {json.dumps(data_summary, indent=2)}"
+ else:
+ part_content = (
+ f"{type(part.root).__name__}:"
+ f" {part.model_dump_json(exclude_none=True, exclude=_EXCLUDED_PART_FIELD)}"
+ )
+
+ # Add part metadata if it exists
+ if hasattr(part.root, "metadata") and part.root.metadata:
+ metadata_str = json.dumps(part.root.metadata, indent=2).replace(
+ "\n", "\n "
+ )
+ part_content += f"\n Part Metadata: {metadata_str}"
+
+ return part_content
+
+
+def build_a2a_request_log(req: SendMessageRequest) -> str:
+ """Builds a structured log representation of an A2A request.
+
+ Args:
+ req: The A2A SendMessageRequest to log.
+
+ Returns:
+ A formatted string representation of the request.
+ """
+ # Message parts logs
+ message_parts_logs = []
+ if req.params.message.parts:
+ for i, part in enumerate(req.params.message.parts):
+ part_log = build_message_part_log(part)
+ # Replace any internal newlines with indented newlines to maintain formatting
+ part_log_formatted = part_log.replace("\n", "\n ")
+ message_parts_logs.append(f"Part {i}: {part_log_formatted}")
+
+ # Configuration logs
+ config_log = "None"
+ if req.params.configuration:
+ config_data = {
+ "acceptedOutputModes": req.params.configuration.acceptedOutputModes,
+ "blocking": req.params.configuration.blocking,
+ "historyLength": req.params.configuration.historyLength,
+ "pushNotificationConfig": bool(
+ req.params.configuration.pushNotificationConfig
+ ),
+ }
+ config_log = json.dumps(config_data, indent=2)
+
+ # Build message metadata section
+ message_metadata_section = ""
+ if req.params.message.metadata:
+ message_metadata_section = f"""
+ Metadata:
+ {json.dumps(req.params.message.metadata, indent=2).replace(chr(10), chr(10) + ' ')}"""
+
+ # Build optional sections
+ optional_sections = []
+
+ if req.params.metadata:
+ optional_sections.append(
+ f"""-----------------------------------------------------------
+Metadata:
+{json.dumps(req.params.metadata, indent=2)}"""
+ )
+
+ optional_sections_str = _NEW_LINE.join(optional_sections)
+
+ return f"""
+A2A Request:
+-----------------------------------------------------------
+Request ID: {req.id}
+Method: {req.method}
+JSON-RPC: {req.jsonrpc}
+-----------------------------------------------------------
+Message:
+ ID: {req.params.message.messageId}
+ Role: {req.params.message.role}
+ Task ID: {req.params.message.taskId}
+ Context ID: {req.params.message.contextId}{message_metadata_section}
+-----------------------------------------------------------
+Message Parts:
+{_NEW_LINE.join(message_parts_logs) if message_parts_logs else "No parts"}
+-----------------------------------------------------------
+Configuration:
+{config_log}
+{optional_sections_str}
+-----------------------------------------------------------
+"""
+
+
+def build_a2a_response_log(resp: SendMessageResponse) -> str:
+ """Builds a structured log representation of an A2A response.
+
+ Args:
+ resp: The A2A SendMessageResponse to log.
+
+ Returns:
+ A formatted string representation of the response.
+ """
+ # Handle error responses
+ if hasattr(resp.root, "error"):
+ return f"""
+A2A Response:
+-----------------------------------------------------------
+Type: ERROR
+Error Code: {resp.root.error.code}
+Error Message: {resp.root.error.message}
+Error Data: {json.dumps(resp.root.error.data, indent=2) if resp.root.error.data else "None"}
+-----------------------------------------------------------
+Response ID: {resp.root.id}
+JSON-RPC: {resp.root.jsonrpc}
+-----------------------------------------------------------
+"""
+
+ # Handle success responses
+ result = resp.root.result
+ result_type = type(result).__name__
+
+ # Build result details based on type
+ result_details = []
+
+ if _is_a2a_task(result):
+ result_details.extend([
+ f"Task ID: {result.id}",
+ f"Context ID: {result.contextId}",
+ f"Status State: {result.status.state}",
+ f"Status Timestamp: {result.status.timestamp}",
+ f"History Length: {len(result.history) if result.history else 0}",
+ f"Artifacts Count: {len(result.artifacts) if result.artifacts else 0}",
+ ])
+
+ # Add task metadata if it exists
+ if result.metadata:
+ result_details.append("Task Metadata:")
+ metadata_formatted = json.dumps(result.metadata, indent=2).replace(
+ "\n", "\n "
+ )
+ result_details.append(f" {metadata_formatted}")
+
+ elif _is_a2a_message(result):
+ result_details.extend([
+ f"Message ID: {result.messageId}",
+ f"Role: {result.role}",
+ f"Task ID: {result.taskId}",
+ f"Context ID: {result.contextId}",
+ ])
+
+ # Add message parts
+ if result.parts:
+ result_details.append("Message Parts:")
+ for i, part in enumerate(result.parts):
+ part_log = build_message_part_log(part)
+ # Replace any internal newlines with indented newlines to maintain formatting
+ part_log_formatted = part_log.replace("\n", "\n ")
+ result_details.append(f" Part {i}: {part_log_formatted}")
+
+ # Add metadata if it exists
+ if result.metadata:
+ result_details.append("Metadata:")
+ metadata_formatted = json.dumps(result.metadata, indent=2).replace(
+ "\n", "\n "
+ )
+ result_details.append(f" {metadata_formatted}")
+
+ else:
+ # Handle other result types by showing their JSON representation
+ if hasattr(result, "model_dump_json"):
+ try:
+ result_json = result.model_dump_json()
+ result_details.append(f"JSON Data: {result_json}")
+ except Exception:
+ result_details.append("JSON Data: ")
+
+ # Build status message section
+ status_message_section = "None"
+ if _is_a2a_task(result) and result.status.message:
+ status_parts_logs = []
+ if result.status.message.parts:
+ for i, part in enumerate(result.status.message.parts):
+ part_log = build_message_part_log(part)
+ # Replace any internal newlines with indented newlines to maintain formatting
+ part_log_formatted = part_log.replace("\n", "\n ")
+ status_parts_logs.append(f"Part {i}: {part_log_formatted}")
+
+ # Build status message metadata section
+ status_metadata_section = ""
+ if result.status.message.metadata:
+ status_metadata_section = f"""
+Metadata:
+{json.dumps(result.status.message.metadata, indent=2)}"""
+
+ status_message_section = f"""ID: {result.status.message.messageId}
+Role: {result.status.message.role}
+Task ID: {result.status.message.taskId}
+Context ID: {result.status.message.contextId}
+Message Parts:
+{_NEW_LINE.join(status_parts_logs) if status_parts_logs else "No parts"}{status_metadata_section}"""
+
+ # Build history section
+ history_section = "No history"
+ if _is_a2a_task(result) and result.history:
+ history_logs = []
+ for i, message in enumerate(result.history):
+ message_parts_logs = []
+ if message.parts:
+ for j, part in enumerate(message.parts):
+ part_log = build_message_part_log(part)
+ # Replace any internal newlines with indented newlines to maintain formatting
+ part_log_formatted = part_log.replace("\n", "\n ")
+ message_parts_logs.append(f" Part {j}: {part_log_formatted}")
+
+ # Build message metadata section
+ message_metadata_section = ""
+ if message.metadata:
+ message_metadata_section = f"""
+ Metadata:
+ {json.dumps(message.metadata, indent=2).replace(chr(10), chr(10) + ' ')}"""
+
+ history_logs.append(
+ f"""Message {i + 1}:
+ ID: {message.messageId}
+ Role: {message.role}
+ Task ID: {message.taskId}
+ Context ID: {message.contextId}
+ Message Parts:
+{_NEW_LINE.join(message_parts_logs) if message_parts_logs else " No parts"}{message_metadata_section}"""
+ )
+
+ history_section = _NEW_LINE.join(history_logs)
+
+ return f"""
+A2A Response:
+-----------------------------------------------------------
+Type: SUCCESS
+Result Type: {result_type}
+-----------------------------------------------------------
+Result Details:
+{_NEW_LINE.join(result_details)}
+-----------------------------------------------------------
+Status Message:
+{status_message_section}
+-----------------------------------------------------------
+History:
+{history_section}
+-----------------------------------------------------------
+Response ID: {resp.root.id}
+JSON-RPC: {resp.root.jsonrpc}
+-----------------------------------------------------------
+"""
diff --git a/src/google/adk/agents/llm_agent.py b/src/google/adk/agents/llm_agent.py
index fe145a60e..a5c859e26 100644
--- a/src/google/adk/agents/llm_agent.py
+++ b/src/google/adk/agents/llm_agent.py
@@ -161,10 +161,12 @@ class LlmAgent(BaseAgent):
# LLM-based agent transfer configs - End
include_contents: Literal['default', 'none'] = 'default'
- """Whether to include contents in the model request.
+ """Controls content inclusion in model requests.
- When set to 'none', the model request will not include any contents, such as
- user messages, tool results, etc.
+ Options:
+ default: Model receives relevant conversation history
+ none: Model receives no prior history, operates solely on current
+ instruction and input
"""
# Controlled input/output configurations - Start
@@ -429,16 +431,31 @@ def _llm_flow(self) -> BaseLlmFlow:
def __maybe_save_output_to_state(self, event: Event):
"""Saves the model output to state if needed."""
+ # skip if the event was authored by some other agent (e.g. current agent
+ # transferred to another agent)
+ if event.author != self.name:
+ logger.debug(
+ 'Skipping output save for agent %s: event authored by %s',
+ self.name,
+ event.author,
+ )
+ return
if (
self.output_key
and event.is_final_response()
and event.content
and event.content.parts
):
+
result = ''.join(
[part.text if part.text else '' for part in event.content.parts]
)
if self.output_schema:
+ # If the result from the final chunk is just whitespace or empty,
+ # it means this is an empty final chunk of a stream.
+ # Do not attempt to parse it as JSON.
+ if not result.strip():
+ return
result = self.output_schema.model_validate_json(result).model_dump(
exclude_none=True
)
diff --git a/src/google/adk/agents/remote_a2a_agent.py b/src/google/adk/agents/remote_a2a_agent.py
new file mode 100644
index 000000000..b9f765576
--- /dev/null
+++ b/src/google/adk/agents/remote_a2a_agent.py
@@ -0,0 +1,532 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import json
+import logging
+from pathlib import Path
+from typing import Any
+from typing import AsyncGenerator
+from typing import Optional
+from typing import Union
+from urllib.parse import urlparse
+import uuid
+
+try:
+ from a2a.client import A2AClient
+ from a2a.client.client import A2ACardResolver # Import A2ACardResolver
+ from a2a.types import AgentCard
+ from a2a.types import Message as A2AMessage
+ from a2a.types import MessageSendParams as A2AMessageSendParams
+ from a2a.types import Part as A2APart
+ from a2a.types import Role
+ from a2a.types import SendMessageRequest
+ from a2a.types import SendMessageSuccessResponse
+ from a2a.types import Task as A2ATask
+
+except ImportError as e:
+ import sys
+
+ if sys.version_info < (3, 10):
+ raise ImportError(
+ "A2A requires Python 3.10 or above. Please upgrade your Python version."
+ ) from e
+ else:
+ raise e
+
+from google.genai import types as genai_types
+import httpx
+
+from ..a2a.converters.event_converter import convert_a2a_message_to_event
+from ..a2a.converters.event_converter import convert_a2a_task_to_event
+from ..a2a.converters.event_converter import convert_event_to_a2a_message
+from ..a2a.converters.part_converter import convert_genai_part_to_a2a_part
+from ..a2a.logs.log_utils import build_a2a_request_log
+from ..a2a.logs.log_utils import build_a2a_response_log
+from ..agents.invocation_context import InvocationContext
+from ..events.event import Event
+from ..flows.llm_flows.contents import _convert_foreign_event
+from ..flows.llm_flows.contents import _is_other_agent_reply
+from ..flows.llm_flows.functions import find_matching_function_call
+from ..utils.feature_decorator import experimental
+from .base_agent import BaseAgent
+
+# Constants
+A2A_METADATA_PREFIX = "a2a:"
+DEFAULT_TIMEOUT = 600.0
+
+
+logger = logging.getLogger("google_adk." + __name__)
+
+
+@experimental
+class AgentCardResolutionError(Exception):
+ """Raised when agent card resolution fails."""
+
+ pass
+
+
+@experimental
+class A2AClientError(Exception):
+ """Raised when A2A client operations fail."""
+
+ pass
+
+
+@experimental
+class RemoteA2aAgent(BaseAgent):
+ """Agent that communicates with a remote A2A agent via A2A client.
+
+ This agent supports multiple ways to specify the remote agent:
+ 1. Direct AgentCard object
+ 2. URL to agent card JSON
+ 3. File path to agent card JSON
+
+ The agent handles:
+ - Agent card resolution and validation
+ - HTTP client management with proper resource cleanup
+ - A2A message conversion and error handling
+ - Session state management across requests
+ """
+
+ def __init__(
+ self,
+ name: str,
+ agent_card: Union[AgentCard, str],
+ description: str = "",
+ httpx_client: Optional[httpx.AsyncClient] = None,
+ timeout: float = DEFAULT_TIMEOUT,
+ **kwargs: Any,
+ ) -> None:
+ """Initialize RemoteA2aAgent.
+
+ Args:
+ name: Agent name (must be unique identifier)
+ agent_card: AgentCard object, URL string, or file path string
+ description: Agent description (auto-populated from card if empty)
+ httpx_client: Optional shared HTTP client (will create own if not provided)
+ timeout: HTTP timeout in seconds
+ **kwargs: Additional arguments passed to BaseAgent
+
+ Raises:
+ ValueError: If name is invalid or agent_card is None
+ TypeError: If agent_card is not a supported type
+ """
+ super().__init__(name=name, description=description, **kwargs)
+
+ if agent_card is None:
+ raise ValueError("agent_card cannot be None")
+
+ self._agent_card: Optional[AgentCard] = None
+ self._agent_card_source: Optional[str] = None
+ self._rpc_url: Optional[str] = None
+ self._a2a_client: Optional[A2AClient] = None
+ self._httpx_client = httpx_client
+ self._httpx_client_needs_cleanup = httpx_client is None
+ self._timeout = timeout
+ self._is_resolved = False
+
+ # Validate and store agent card reference
+ if isinstance(agent_card, AgentCard):
+ self._agent_card = agent_card
+ elif isinstance(agent_card, str):
+ if not agent_card.strip():
+ raise ValueError("agent_card string cannot be empty")
+ self._agent_card_source = agent_card.strip()
+ else:
+ raise TypeError(
+ "agent_card must be AgentCard, URL string, or file path string, "
+ f"got {type(agent_card)}"
+ )
+
+ async def _ensure_httpx_client(self) -> httpx.AsyncClient:
+ """Ensure HTTP client is available and properly configured."""
+ if not self._httpx_client:
+ self._httpx_client = httpx.AsyncClient(
+ timeout=httpx.Timeout(timeout=self._timeout)
+ )
+ self._httpx_client_needs_cleanup = True
+ return self._httpx_client
+
+ async def _resolve_agent_card_from_url(self, url: str) -> AgentCard:
+ """Resolve agent card from URL."""
+ try:
+ parsed_url = urlparse(url)
+ if not parsed_url.scheme or not parsed_url.netloc:
+ raise ValueError(f"Invalid URL format: {url}")
+
+ base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
+ relative_card_path = parsed_url.path
+
+ httpx_client = await self._ensure_httpx_client()
+ resolver = A2ACardResolver(
+ httpx_client=httpx_client,
+ base_url=base_url,
+ )
+ return await resolver.get_agent_card(
+ relative_card_path=relative_card_path
+ )
+ except Exception as e:
+ raise AgentCardResolutionError(
+ f"Failed to resolve AgentCard from URL {url}: {e}"
+ ) from e
+
+ async def _resolve_agent_card_from_file(self, file_path: str) -> AgentCard:
+ """Resolve agent card from file path."""
+ try:
+ path = Path(file_path)
+ if not path.exists():
+ raise FileNotFoundError(f"Agent card file not found: {file_path}")
+ if not path.is_file():
+ raise ValueError(f"Path is not a file: {file_path}")
+
+ with path.open("r", encoding="utf-8") as f:
+ agent_json_data = json.load(f)
+ return AgentCard(**agent_json_data)
+ except json.JSONDecodeError as e:
+ raise AgentCardResolutionError(
+ f"Invalid JSON in agent card file {file_path}: {e}"
+ ) from e
+ except Exception as e:
+ raise AgentCardResolutionError(
+ f"Failed to resolve AgentCard from file {file_path}: {e}"
+ ) from e
+
+ async def _resolve_agent_card(self) -> AgentCard:
+ """Resolve agent card from source."""
+
+ # Determine if source is URL or file path
+ if self._agent_card_source.startswith(("http://", "https://")):
+ return await self._resolve_agent_card_from_url(self._agent_card_source)
+ else:
+ return await self._resolve_agent_card_from_file(self._agent_card_source)
+
+ async def _validate_agent_card(self, agent_card: AgentCard) -> None:
+ """Validate resolved agent card."""
+ if not agent_card.url:
+ raise AgentCardResolutionError(
+ "Agent card must have a valid URL for RPC communication"
+ )
+
+ # Additional validation can be added here
+ try:
+ parsed_url = urlparse(str(agent_card.url))
+ if not parsed_url.scheme or not parsed_url.netloc:
+ raise ValueError("Invalid RPC URL format")
+ except Exception as e:
+ raise AgentCardResolutionError(
+ f"Invalid RPC URL in agent card: {agent_card.url}, error: {e}"
+ ) from e
+
+ async def _ensure_resolved(self) -> None:
+ """Ensures agent card is resolved, RPC URL is determined, and A2A client is initialized."""
+ if self._is_resolved:
+ return
+
+ try:
+ # Resolve agent card if needed
+ if not self._agent_card:
+ self._agent_card = await self._resolve_agent_card()
+
+ # Validate agent card
+ await self._validate_agent_card(self._agent_card)
+
+ # Set RPC URL
+ self._rpc_url = str(self._agent_card.url)
+
+ # Update description if empty
+ if not self.description and self._agent_card.description:
+ self.description = self._agent_card.description
+
+ # Initialize A2A client
+ if not self._a2a_client:
+ httpx_client = await self._ensure_httpx_client()
+ self._a2a_client = A2AClient(
+ httpx_client=httpx_client,
+ agent_card=self._agent_card,
+ url=self._rpc_url,
+ )
+
+ self._is_resolved = True
+ logger.info("Successfully resolved remote A2A agent: %s", self.name)
+
+ except Exception as e:
+ logger.error("Failed to resolve remote A2A agent %s: %s", self.name, e)
+ raise AgentCardResolutionError(
+ f"Failed to initialize remote A2A agent {self.name}: {e}"
+ ) from e
+
+ def _create_a2a_request_for_user_function_response(
+ self, ctx: InvocationContext
+ ) -> Optional[SendMessageRequest]:
+ """Create A2A request for user function response if applicable.
+
+ Args:
+ ctx: The invocation context
+
+ Returns:
+ SendMessageRequest if function response found, None otherwise
+ """
+ if not ctx.session.events or ctx.session.events[-1].author != "user":
+ return None
+ function_call_event = find_matching_function_call(ctx.session.events)
+ if not function_call_event:
+ return None
+
+ a2a_message = convert_event_to_a2a_message(
+ ctx.session.events[-1], ctx, Role.user
+ )
+ if function_call_event.custom_metadata:
+ a2a_message.taskId = (
+ function_call_event.custom_metadata.get(
+ A2A_METADATA_PREFIX + "task_id"
+ )
+ if function_call_event.custom_metadata
+ else None
+ )
+ a2a_message.contextId = (
+ function_call_event.custom_metadata.get(
+ A2A_METADATA_PREFIX + "context_id"
+ )
+ if function_call_event.custom_metadata
+ else None
+ )
+
+ return SendMessageRequest(
+ id=str(uuid.uuid4()),
+ params=A2AMessageSendParams(
+ message=a2a_message,
+ ),
+ )
+
+ def _construct_message_parts_from_session(
+ self, ctx: InvocationContext
+ ) -> tuple[list[A2APart], dict[str, Any], str]:
+ """Construct A2A message parts from session events.
+
+ Args:
+ ctx: The invocation context
+
+ Returns:
+ List of A2A parts extracted from session events, context ID
+ """
+ message_parts: list[A2APart] = []
+ context_id = None
+ for event in reversed(ctx.session.events):
+ if _is_other_agent_reply(self.name, event):
+ event = _convert_foreign_event(event)
+ elif event.author == self.name:
+ # stop on content generated by current a2a agent given it should already
+ # be in remote session
+ if event.custom_metadata:
+ context_id = (
+ event.custom_metadata.get(A2A_METADATA_PREFIX + "context_id")
+ if event.custom_metadata
+ else None
+ )
+ break
+
+ if not event.content or not event.content.parts:
+ continue
+
+ for part in event.content.parts:
+
+ converted_part = convert_genai_part_to_a2a_part(part)
+ if converted_part:
+ message_parts.append(converted_part)
+ else:
+ logger.warning("Failed to convert part to A2A format: %s", part)
+
+ return message_parts[::-1], context_id
+
+ async def _handle_a2a_response(
+ self, a2a_response: Any, ctx: InvocationContext
+ ) -> Event:
+ """Handle A2A response and convert to Event.
+
+ Args:
+ a2a_response: The A2A response object
+ ctx: The invocation context
+
+ Returns:
+ Event object representing the response
+ """
+ try:
+ if isinstance(a2a_response.root, SendMessageSuccessResponse):
+ if a2a_response.root.result:
+ if isinstance(a2a_response.root.result, A2ATask):
+ event = convert_a2a_task_to_event(
+ a2a_response.root.result, self.name, ctx
+ )
+ event.custom_metadata = event.custom_metadata or {}
+ event.custom_metadata[A2A_METADATA_PREFIX + "task_id"] = (
+ a2a_response.root.result.id
+ )
+
+ else:
+ event = convert_a2a_message_to_event(
+ a2a_response.root.result, self.name, ctx
+ )
+ event.custom_metadata = event.custom_metadata or {}
+ if a2a_response.root.result.taskId:
+ event.custom_metadata[A2A_METADATA_PREFIX + "task_id"] = (
+ a2a_response.root.result.taskId
+ )
+
+ if a2a_response.root.result.contextId:
+ event.custom_metadata[A2A_METADATA_PREFIX + "context_id"] = (
+ a2a_response.root.result.contextId
+ )
+
+ else:
+ logger.warning("A2A response has no result: %s", a2a_response.root)
+ event = Event(
+ author=self.name,
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ )
+ else:
+ # Handle error response
+ error_response = a2a_response.root
+ logger.error(
+ "A2A request failed with error: %s, data: %s",
+ error_response.error.message,
+ error_response.error.data,
+ )
+ event = Event(
+ author=self.name,
+ error_message=error_response.error.message,
+ error_code=str(error_response.error.code),
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ )
+
+ return event
+ except Exception as e:
+ logger.error("Failed to handle A2A response: %s", e)
+ return Event(
+ author=self.name,
+ error_message=f"Failed to process A2A response: {e}",
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ )
+
+ async def _run_async_impl(
+ self, ctx: InvocationContext
+ ) -> AsyncGenerator[Event, None]:
+ """Core implementation for async agent execution."""
+ try:
+ await self._ensure_resolved()
+ except Exception as e:
+ yield Event(
+ author=self.name,
+ error_message=f"Failed to initialize remote A2A agent: {e}",
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ )
+ return
+
+ # Create A2A request for function response or regular message
+ a2a_request = self._create_a2a_request_for_user_function_response(ctx)
+ if not a2a_request:
+ message_parts, context_id = self._construct_message_parts_from_session(
+ ctx
+ )
+
+ if not message_parts:
+ logger.warning(
+ "No parts to send to remote A2A agent. Emitting empty event."
+ )
+ yield Event(
+ author=self.name,
+ content=genai_types.Content(),
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ )
+ return
+
+ a2a_request = SendMessageRequest(
+ id=str(uuid.uuid4()),
+ params=A2AMessageSendParams(
+ message=A2AMessage(
+ messageId=str(uuid.uuid4()),
+ parts=message_parts,
+ role="user",
+ contextId=context_id,
+ )
+ ),
+ )
+
+ logger.info(build_a2a_request_log(a2a_request))
+
+ try:
+ a2a_response = await self._a2a_client.send_message(request=a2a_request)
+ logger.info(build_a2a_response_log(a2a_response))
+
+ event = await self._handle_a2a_response(a2a_response, ctx)
+
+ # Add metadata about the request and response
+ event.custom_metadata = event.custom_metadata or {}
+ event.custom_metadata[A2A_METADATA_PREFIX + "request"] = (
+ a2a_request.model_dump(exclude_none=True, by_alias=True)
+ )
+ event.custom_metadata[A2A_METADATA_PREFIX + "response"] = (
+ a2a_response.root.model_dump(exclude_none=True, by_alias=True)
+ )
+
+ yield event
+
+ except Exception as e:
+ error_message = f"A2A request failed: {e}"
+ logger.error(error_message)
+
+ yield Event(
+ author=self.name,
+ error_message=error_message,
+ invocation_id=ctx.invocation_id,
+ branch=ctx.branch,
+ custom_metadata={
+ A2A_METADATA_PREFIX
+ + "request": a2a_request.model_dump(
+ exclude_none=True, by_alias=True
+ ),
+ A2A_METADATA_PREFIX + "error": error_message,
+ },
+ )
+
+ async def _run_live_impl(
+ self, ctx: InvocationContext
+ ) -> AsyncGenerator[Event, None]:
+ """Core implementation for live agent execution (not implemented)."""
+ raise NotImplementedError(
+ f"_run_live_impl for {type(self)} via A2A is not implemented."
+ )
+ # This makes the function an async generator but the yield is still unreachable
+ yield
+
+ async def cleanup(self) -> None:
+ """Clean up resources, especially the HTTP client if owned by this agent."""
+ if self._httpx_client_needs_cleanup and self._httpx_client:
+ try:
+ await self._httpx_client.aclose()
+ logger.debug("Closed HTTP client for agent %s", self.name)
+ except Exception as e:
+ logger.warning(
+ "Failed to close HTTP client for agent %s: %s",
+ self.name,
+ e,
+ )
+ finally:
+ self._httpx_client = None
diff --git a/src/google/adk/artifacts/gcs_artifact_service.py b/src/google/adk/artifacts/gcs_artifact_service.py
index e4af21e15..35aa88622 100644
--- a/src/google/adk/artifacts/gcs_artifact_service.py
+++ b/src/google/adk/artifacts/gcs_artifact_service.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""An artifact service implementation using Google Cloud Storage (GCS)."""
+from __future__ import annotations
import logging
from typing import Optional
@@ -151,7 +152,7 @@ async def list_artifact_keys(
self.bucket, prefix=session_prefix
)
for blob in session_blobs:
- _, _, _, filename, _ = blob.name.split("/")
+ *_, filename, _ = blob.name.split("/")
filenames.add(filename)
user_namespace_prefix = f"{app_name}/{user_id}/user/"
@@ -159,7 +160,7 @@ async def list_artifact_keys(
self.bucket, prefix=user_namespace_prefix
)
for blob in user_namespace_blobs:
- _, _, _, filename, _ = blob.name.split("/")
+ *_, filename, _ = blob.name.split("/")
filenames.add(filename)
return sorted(list(filenames))
diff --git a/src/google/adk/auth/auth_credential.py b/src/google/adk/auth/auth_credential.py
index 1009a50dd..34d04dde9 100644
--- a/src/google/adk/auth/auth_credential.py
+++ b/src/google/adk/auth/auth_credential.py
@@ -230,4 +230,3 @@ class AuthCredential(BaseModelWithConfig):
http: Optional[HttpAuth] = None
service_account: Optional[ServiceAccount] = None
oauth2: Optional[OAuth2Auth] = None
- google_oauth2_json: Optional[str] = None
diff --git a/src/google/adk/auth/auth_handler.py b/src/google/adk/auth/auth_handler.py
index 3e13cbac2..473f31413 100644
--- a/src/google/adk/auth/auth_handler.py
+++ b/src/google/adk/auth/auth_handler.py
@@ -22,7 +22,7 @@
from .auth_schemes import AuthSchemeType
from .auth_schemes import OpenIdConnectWithConfig
from .auth_tool import AuthConfig
-from .oauth2_credential_fetcher import OAuth2CredentialFetcher
+from .exchanger.oauth2_credential_exchanger import OAuth2CredentialExchanger
if TYPE_CHECKING:
from ..sessions.state import State
@@ -36,18 +36,23 @@
class AuthHandler:
+ """A handler that handles the auth flow in Agent Development Kit to help
+ orchestrate the credential request and response flow (e.g. OAuth flow)
+ This class should only be used by Agent Development Kit.
+ """
def __init__(self, auth_config: AuthConfig):
self.auth_config = auth_config
- def exchange_auth_token(
+ async def exchange_auth_token(
self,
) -> AuthCredential:
- return OAuth2CredentialFetcher(
- self.auth_config.auth_scheme, self.auth_config.exchanged_auth_credential
- ).exchange()
+ exchanger = OAuth2CredentialExchanger()
+ return await exchanger.exchange(
+ self.auth_config.exchanged_auth_credential, self.auth_config.auth_scheme
+ )
- def parse_and_store_auth_response(self, state: State) -> None:
+ async def parse_and_store_auth_response(self, state: State) -> None:
credential_key = "temp:" + self.auth_config.credential_key
@@ -60,7 +65,7 @@ def parse_and_store_auth_response(self, state: State) -> None:
):
return
- state[credential_key] = self.exchange_auth_token()
+ state[credential_key] = await self.exchange_auth_token()
def _validate(self) -> None:
if not self.auth_scheme:
diff --git a/src/google/adk/auth/auth_preprocessor.py b/src/google/adk/auth/auth_preprocessor.py
index 0c964ed96..b06774973 100644
--- a/src/google/adk/auth/auth_preprocessor.py
+++ b/src/google/adk/auth/auth_preprocessor.py
@@ -67,9 +67,9 @@ async def run_async(
# function call
request_euc_function_call_ids.add(function_call_response.id)
auth_config = AuthConfig.model_validate(function_call_response.response)
- AuthHandler(auth_config=auth_config).parse_and_store_auth_response(
- state=invocation_context.session.state
- )
+ await AuthHandler(
+ auth_config=auth_config
+ ).parse_and_store_auth_response(state=invocation_context.session.state)
break
if not request_euc_function_call_ids:
diff --git a/src/google/adk/auth/auth_tool.py b/src/google/adk/auth/auth_tool.py
index 53c571d42..0316e5258 100644
--- a/src/google/adk/auth/auth_tool.py
+++ b/src/google/adk/auth/auth_tool.py
@@ -31,12 +31,12 @@ class AuthConfig(BaseModelWithConfig):
auth_scheme: AuthScheme
"""The auth scheme used to collect credentials"""
- raw_auth_credential: AuthCredential = None
+ raw_auth_credential: Optional[AuthCredential] = None
"""The raw auth credential used to collect credentials. The raw auth
credentials are used in some auth scheme that needs to exchange auth
credentials. e.g. OAuth2 and OIDC. For other auth scheme, it could be None.
"""
- exchanged_auth_credential: AuthCredential = None
+ exchanged_auth_credential: Optional[AuthCredential] = None
"""The exchanged auth credential used to collect credentials. adk and client
will work together to fill it. For those auth scheme that doesn't need to
exchange auth credentials, e.g. API key, service account etc. It's filled by
diff --git a/src/google/adk/auth/credential_manager.py b/src/google/adk/auth/credential_manager.py
new file mode 100644
index 000000000..0dbf006ab
--- /dev/null
+++ b/src/google/adk/auth/credential_manager.py
@@ -0,0 +1,261 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..tools.tool_context import ToolContext
+from ..utils.feature_decorator import experimental
+from .auth_credential import AuthCredential
+from .auth_credential import AuthCredentialTypes
+from .auth_schemes import AuthSchemeType
+from .auth_tool import AuthConfig
+from .exchanger.base_credential_exchanger import BaseCredentialExchanger
+from .exchanger.credential_exchanger_registry import CredentialExchangerRegistry
+from .refresher.base_credential_refresher import BaseCredentialRefresher
+from .refresher.credential_refresher_registry import CredentialRefresherRegistry
+
+
+@experimental
+class CredentialManager:
+ """Manages authentication credentials through a structured workflow.
+
+ The CredentialManager orchestrates the complete lifecycle of authentication
+ credentials, from initial loading to final preparation for use. It provides
+ a centralized interface for handling various credential types and authentication
+ schemes while maintaining proper credential hygiene (refresh, exchange, caching).
+
+ This class is only for use by Agent Development Kit.
+
+ Args:
+ auth_config: Configuration containing authentication scheme and credentials
+
+ Example:
+ ```python
+ auth_config = AuthConfig(
+ auth_scheme=oauth2_scheme,
+ raw_auth_credential=service_account_credential
+ )
+ manager = CredentialManager(auth_config)
+
+ # Register custom exchanger if needed
+ manager.register_credential_exchanger(
+ AuthCredentialTypes.CUSTOM_TYPE,
+ CustomCredentialExchanger()
+ )
+
+ # Register custom refresher if needed
+ manager.register_credential_refresher(
+ AuthCredentialTypes.CUSTOM_TYPE,
+ CustomCredentialRefresher()
+ )
+
+ # Load and prepare credential
+ credential = await manager.load_auth_credential(tool_context)
+ ```
+ """
+
+ def __init__(
+ self,
+ auth_config: AuthConfig,
+ ):
+ self._auth_config = auth_config
+ self._exchanger_registry = CredentialExchangerRegistry()
+ self._refresher_registry = CredentialRefresherRegistry()
+
+ # Register default exchangers and refreshers
+ # TODO: support service account credential exchanger
+ from .refresher.oauth2_credential_refresher import OAuth2CredentialRefresher
+
+ oauth2_refresher = OAuth2CredentialRefresher()
+ self._refresher_registry.register(
+ AuthCredentialTypes.OAUTH2, oauth2_refresher
+ )
+ self._refresher_registry.register(
+ AuthCredentialTypes.OPEN_ID_CONNECT, oauth2_refresher
+ )
+
+ def register_credential_exchanger(
+ self,
+ credential_type: AuthCredentialTypes,
+ exchanger_instance: BaseCredentialExchanger,
+ ) -> None:
+ """Register a credential exchanger for a credential type.
+
+ Args:
+ credential_type: The credential type to register for.
+ exchanger_instance: The exchanger instance to register.
+ """
+ self._exchanger_registry.register(credential_type, exchanger_instance)
+
+ async def request_credential(self, tool_context: ToolContext) -> None:
+ tool_context.request_credential(self._auth_config)
+
+ async def get_auth_credential(
+ self, tool_context: ToolContext
+ ) -> Optional[AuthCredential]:
+ """Load and prepare authentication credential through a structured workflow."""
+
+ # Step 1: Validate credential configuration
+ await self._validate_credential()
+
+ # Step 2: Check if credential is already ready (no processing needed)
+ if self._is_credential_ready():
+ return self._auth_config.raw_auth_credential
+
+ # Step 3: Try to load existing processed credential
+ credential = await self._load_existing_credential(tool_context)
+
+ # Step 4: If no existing credential, load from auth response
+ # TODO instead of load from auth response, we can store auth response in
+ # credential service.
+ was_from_auth_response = False
+ if not credential:
+ credential = await self._load_from_auth_response(tool_context)
+ was_from_auth_response = True
+
+ # Step 5: If still no credential available, return None
+ if not credential:
+ return None
+
+ # Step 6: Exchange credential if needed (e.g., service account to access token)
+ credential, was_exchanged = await self._exchange_credential(credential)
+
+ # Step 7: Refresh credential if expired
+ if not was_exchanged:
+ credential, was_refreshed = await self._refresh_credential(credential)
+
+ # Step 8: Save credential if it was modified
+ if was_from_auth_response or was_exchanged or was_refreshed:
+ await self._save_credential(tool_context, credential)
+
+ return credential
+
+ async def _load_existing_credential(
+ self, tool_context: ToolContext
+ ) -> Optional[AuthCredential]:
+ """Load existing credential from credential service or cached exchanged credential."""
+
+ # Try loading from credential service first
+ credential = await self._load_from_credential_service(tool_context)
+ if credential:
+ return credential
+
+ # Check if we have a cached exchanged credential
+ if self._auth_config.exchanged_auth_credential:
+ return self._auth_config.exchanged_auth_credential
+
+ return None
+
+ async def _load_from_credential_service(
+ self, tool_context: ToolContext
+ ) -> Optional[AuthCredential]:
+ """Load credential from credential service if available."""
+ credential_service = tool_context._invocation_context.credential_service
+ if credential_service:
+ # Note: This should be made async in a future refactor
+ # For now, assuming synchronous operation
+ return await credential_service.load_credential(
+ self._auth_config, tool_context
+ )
+ return None
+
+ async def _load_from_auth_response(
+ self, tool_context: ToolContext
+ ) -> Optional[AuthCredential]:
+ """Load credential from auth response in tool context."""
+ return tool_context.get_auth_response(self._auth_config)
+
+ async def _exchange_credential(
+ self, credential: AuthCredential
+ ) -> tuple[AuthCredential, bool]:
+ """Exchange credential if needed and return the credential and whether it was exchanged."""
+ exchanger = self._exchanger_registry.get_exchanger(credential.auth_type)
+ if not exchanger:
+ return credential, False
+
+ exchanged_credential = await exchanger.exchange(
+ credential, self._auth_config.auth_scheme
+ )
+ return exchanged_credential, True
+
+ async def _refresh_credential(
+ self, credential: AuthCredential
+ ) -> tuple[AuthCredential, bool]:
+ """Refresh credential if expired and return the credential and whether it was refreshed."""
+ refresher = self._refresher_registry.get_refresher(credential.auth_type)
+ if not refresher:
+ return credential, False
+
+ if await refresher.is_refresh_needed(
+ credential, self._auth_config.auth_scheme
+ ):
+ refreshed_credential = await refresher.refresh(
+ credential, self._auth_config.auth_scheme
+ )
+ return refreshed_credential, True
+
+ return credential, False
+
+ def _is_credential_ready(self) -> bool:
+ """Check if credential is ready to use without further processing."""
+ raw_credential = self._auth_config.raw_auth_credential
+ if not raw_credential:
+ return False
+
+ # Simple credentials that don't need exchange or refresh
+ return raw_credential.auth_type in (
+ AuthCredentialTypes.API_KEY,
+ AuthCredentialTypes.HTTP,
+ # Add other simple auth types as needed
+ )
+
+ async def _validate_credential(self) -> None:
+ """Validate credential configuration and raise errors if invalid."""
+ if not self._auth_config.raw_auth_credential:
+ if self._auth_config.auth_scheme.type_ in (
+ AuthSchemeType.oauth2,
+ AuthSchemeType.openIdConnect,
+ ):
+ raise ValueError(
+ "raw_auth_credential is required for auth_scheme type "
+ f"{self._auth_config.auth_scheme.type_}"
+ )
+
+ raw_credential = self._auth_config.raw_auth_credential
+ if raw_credential:
+ if (
+ raw_credential.auth_type
+ in (
+ AuthCredentialTypes.OAUTH2,
+ AuthCredentialTypes.OPEN_ID_CONNECT,
+ )
+ and not raw_credential.oauth2
+ ):
+ raise ValueError(
+ "auth_config.raw_credential.oauth2 required for credential type "
+ f"{raw_credential.auth_type}"
+ )
+ # Additional validation can be added here
+
+ async def _save_credential(
+ self, tool_context: ToolContext, credential: AuthCredential
+ ) -> None:
+ """Save credential to credential service if available."""
+ credential_service = tool_context._invocation_context.credential_service
+ if credential_service:
+ # Update the exchanged credential in config
+ self._auth_config.exchanged_auth_credential = credential
+ await credential_service.save_credential(self._auth_config, tool_context)
diff --git a/src/google/adk/auth/credential_service/session_state_credential_service.py b/src/google/adk/auth/credential_service/session_state_credential_service.py
new file mode 100644
index 000000000..e2ff7e07d
--- /dev/null
+++ b/src/google/adk/auth/credential_service/session_state_credential_service.py
@@ -0,0 +1,83 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from typing_extensions import override
+
+from ...tools.tool_context import ToolContext
+from ...utils.feature_decorator import experimental
+from ..auth_credential import AuthCredential
+from ..auth_tool import AuthConfig
+from .base_credential_service import BaseCredentialService
+
+
+@experimental
+class SessionStateCredentialService(BaseCredentialService):
+ """Class for implementation of credential service using session state as the
+ store.
+ Note: store credential in session may not be secure, use at your own risk.
+ """
+
+ @override
+ async def load_credential(
+ self,
+ auth_config: AuthConfig,
+ tool_context: ToolContext,
+ ) -> Optional[AuthCredential]:
+ """
+ Loads the credential by auth config and current tool context from the
+ backend credential store.
+
+ Args:
+ auth_config: The auth config which contains the auth scheme and auth
+ credential information. auth_config.get_credential_key will be used to
+ build the key to load the credential.
+
+ tool_context: The context of the current invocation when the tool is
+ trying to load the credential.
+
+ Returns:
+ Optional[AuthCredential]: the credential saved in the store.
+
+ """
+ return tool_context.state.get(auth_config.credential_key)
+
+ @override
+ async def save_credential(
+ self,
+ auth_config: AuthConfig,
+ tool_context: ToolContext,
+ ) -> None:
+ """
+ Saves the exchanged_auth_credential in auth config to the backend credential
+ store.
+
+ Args:
+ auth_config: The auth config which contains the auth scheme and auth
+ credential information. auth_config.get_credential_key will be used to
+ build the key to save the credential.
+
+ tool_context: The context of the current invocation when the tool is
+ trying to save the credential.
+
+ Returns:
+ None
+ """
+
+ tool_context.state[auth_config.credential_key] = (
+ auth_config.exchanged_auth_credential
+ )
diff --git a/src/google/adk/auth/exchanger/__init__.py b/src/google/adk/auth/exchanger/__init__.py
index 4226ae715..3b0fbb246 100644
--- a/src/google/adk/auth/exchanger/__init__.py
+++ b/src/google/adk/auth/exchanger/__init__.py
@@ -15,9 +15,7 @@
"""Credential exchanger module."""
from .base_credential_exchanger import BaseCredentialExchanger
-from .service_account_credential_exchanger import ServiceAccountCredentialExchanger
__all__ = [
"BaseCredentialExchanger",
- "ServiceAccountCredentialExchanger",
]
diff --git a/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py
new file mode 100644
index 000000000..768457e1a
--- /dev/null
+++ b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py
@@ -0,0 +1,104 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth2 credential exchanger implementation."""
+
+from __future__ import annotations
+
+import logging
+from typing import Optional
+
+from google.adk.auth.auth_credential import AuthCredential
+from google.adk.auth.auth_schemes import AuthScheme
+from google.adk.auth.auth_schemes import OAuthGrantType
+from google.adk.auth.oauth2_credential_util import create_oauth2_session
+from google.adk.auth.oauth2_credential_util import update_credential_with_tokens
+from google.adk.utils.feature_decorator import experimental
+from typing_extensions import override
+
+from .base_credential_exchanger import BaseCredentialExchanger
+from .base_credential_exchanger import CredentialExchangError
+
+try:
+ from authlib.integrations.requests_client import OAuth2Session
+
+ AUTHLIB_AVIALABLE = True
+except ImportError:
+ AUTHLIB_AVIALABLE = False
+
+logger = logging.getLogger("google_adk." + __name__)
+
+
+@experimental
+class OAuth2CredentialExchanger(BaseCredentialExchanger):
+ """Exchanges OAuth2 credentials from authorization responses."""
+
+ @override
+ async def exchange(
+ self,
+ auth_credential: AuthCredential,
+ auth_scheme: Optional[AuthScheme] = None,
+ ) -> AuthCredential:
+ """Exchange OAuth2 credential from authorization response.
+ if credential exchange failed, the original credential will be returned.
+
+ Args:
+ auth_credential: The OAuth2 credential to exchange.
+ auth_scheme: The OAuth2 authentication scheme.
+
+ Returns:
+ The exchanged credential with access token.
+
+ Raises:
+ CredentialExchangError: If auth_scheme is missing.
+ """
+ if not auth_scheme:
+ raise CredentialExchangError(
+ "auth_scheme is required for OAuth2 credential exchange"
+ )
+
+ if not AUTHLIB_AVIALABLE:
+ # If authlib is not available, we cannot exchange the credential.
+ # We return the original credential without exchange.
+ # The client using this tool can decide to exchange the credential
+ # themselves using other lib.
+ logger.warning(
+ "authlib is not available, skipping OAuth2 credential exchange."
+ )
+ return auth_credential
+
+ if auth_credential.oauth2 and auth_credential.oauth2.access_token:
+ return auth_credential
+
+ client, token_endpoint = create_oauth2_session(auth_scheme, auth_credential)
+ if not client:
+ logger.warning("Could not create OAuth2 session for token exchange")
+ return auth_credential
+
+ try:
+ tokens = client.fetch_token(
+ token_endpoint,
+ authorization_response=auth_credential.oauth2.auth_response_uri,
+ code=auth_credential.oauth2.auth_code,
+ grant_type=OAuthGrantType.AUTHORIZATION_CODE,
+ )
+ update_credential_with_tokens(auth_credential, tokens)
+ logger.debug("Successfully exchanged OAuth2 tokens")
+ except Exception as e:
+ # TODO reconsider whether we should raise errors in this case
+ logger.error("Failed to exchange OAuth2 tokens: %s", e)
+ # Return original credential on failure
+ return auth_credential
+
+ return auth_credential
diff --git a/src/google/adk/auth/exchanger/service_account_credential_exchanger.py b/src/google/adk/auth/exchanger/service_account_credential_exchanger.py
deleted file mode 100644
index 415081ca5..000000000
--- a/src/google/adk/auth/exchanger/service_account_credential_exchanger.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Credential fetcher for Google Service Account."""
-
-from __future__ import annotations
-
-from typing import Optional
-
-import google.auth
-from google.auth.transport.requests import Request
-from google.oauth2 import service_account
-from typing_extensions import override
-
-from ...utils.feature_decorator import experimental
-from ..auth_credential import AuthCredential
-from ..auth_credential import AuthCredentialTypes
-from ..auth_schemes import AuthScheme
-from .base_credential_exchanger import BaseCredentialExchanger
-
-
-@experimental
-class ServiceAccountCredentialExchanger(BaseCredentialExchanger):
- """Exchanges Google Service Account credentials for an access token.
-
- Uses the default service credential if `use_default_credential = True`.
- Otherwise, uses the service account credential provided in the auth
- credential.
- """
-
- @override
- async def exchange(
- self,
- auth_credential: AuthCredential,
- auth_scheme: Optional[AuthScheme] = None,
- ) -> AuthCredential:
- """Exchanges the service account auth credential for an access token.
-
- If the AuthCredential contains a service account credential, it will be used
- to exchange for an access token. Otherwise, if use_default_credential is True,
- the default application credential will be used for exchanging an access token.
-
- Args:
- auth_scheme: The authentication scheme.
- auth_credential: The credential to exchange.
-
- Returns:
- An AuthCredential in OAUTH2 format, containing the exchanged credential JSON.
-
- Raises:
- ValueError: If service account credentials are missing or invalid.
- Exception: If credential exchange or refresh fails.
- """
- if auth_credential is None:
- raise ValueError("Credential cannot be None.")
-
- if auth_credential.auth_type != AuthCredentialTypes.SERVICE_ACCOUNT:
- raise ValueError("Credential is not a service account credential.")
-
- if auth_credential.service_account is None:
- raise ValueError(
- "Service account credentials are missing. Please provide them."
- )
-
- if (
- auth_credential.service_account.service_account_credential is None
- and not auth_credential.service_account.use_default_credential
- ):
- raise ValueError(
- "Service account credentials are invalid. Please set the"
- " service_account_credential field or set `use_default_credential ="
- " True` to use application default credential in a hosted service"
- " like Google Cloud Run."
- )
-
- try:
- if auth_credential.service_account.use_default_credential:
- credentials, _ = google.auth.default()
- else:
- config = auth_credential.service_account
- credentials = service_account.Credentials.from_service_account_info(
- config.service_account_credential.model_dump(), scopes=config.scopes
- )
-
- # Refresh credentials to ensure we have a valid access token
- credentials.refresh(Request())
-
- return AuthCredential(
- auth_type=AuthCredentialTypes.OAUTH2,
- google_oauth2_json=credentials.to_json(),
- )
- except Exception as e:
- raise ValueError(f"Failed to exchange service account token: {e}") from e
diff --git a/src/google/adk/auth/oauth2_credential_fetcher.py b/src/google/adk/auth/oauth2_credential_fetcher.py
deleted file mode 100644
index c9e838b25..000000000
--- a/src/google/adk/auth/oauth2_credential_fetcher.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import annotations
-
-import logging
-
-from ..utils.feature_decorator import experimental
-from .auth_credential import AuthCredential
-from .auth_schemes import AuthScheme
-from .auth_schemes import OAuthGrantType
-from .oauth2_credential_util import create_oauth2_session
-from .oauth2_credential_util import update_credential_with_tokens
-
-try:
- from authlib.oauth2.rfc6749 import OAuth2Token
-
- AUTHLIB_AVIALABLE = True
-except ImportError:
- AUTHLIB_AVIALABLE = False
-
-
-logger = logging.getLogger("google_adk." + __name__)
-
-
-@experimental
-class OAuth2CredentialFetcher:
- """Exchanges and refreshes an OAuth2 access token. (Experimental)"""
-
- def __init__(
- self,
- auth_scheme: AuthScheme,
- auth_credential: AuthCredential,
- ):
- self._auth_scheme = auth_scheme
- self._auth_credential = auth_credential
-
- def _update_credential(self, tokens: OAuth2Token) -> None:
- self._auth_credential.oauth2.access_token = tokens.get("access_token")
- self._auth_credential.oauth2.refresh_token = tokens.get("refresh_token")
- self._auth_credential.oauth2.expires_at = (
- int(tokens.get("expires_at")) if tokens.get("expires_at") else None
- )
- self._auth_credential.oauth2.expires_in = (
- int(tokens.get("expires_in")) if tokens.get("expires_in") else None
- )
-
- def exchange(self) -> AuthCredential:
- """Exchange an oauth token from the authorization response.
-
- Returns:
- An AuthCredential object containing the access token.
- """
- if not AUTHLIB_AVIALABLE:
- return self._auth_credential
-
- if (
- self._auth_credential.oauth2
- and self._auth_credential.oauth2.access_token
- ):
- return self._auth_credential
-
- client, token_endpoint = create_oauth2_session(
- self._auth_scheme, self._auth_credential
- )
- if not client:
- logger.warning("Could not create OAuth2 session for token exchange")
- return self._auth_credential
-
- try:
- tokens = client.fetch_token(
- token_endpoint,
- authorization_response=self._auth_credential.oauth2.auth_response_uri,
- code=self._auth_credential.oauth2.auth_code,
- grant_type=OAuthGrantType.AUTHORIZATION_CODE,
- )
- update_credential_with_tokens(self._auth_credential, tokens)
- logger.info("Successfully exchanged OAuth2 tokens")
- except Exception as e:
- logger.error("Failed to exchange OAuth2 tokens: %s", e)
- # Return original credential on failure
- return self._auth_credential
-
- return self._auth_credential
-
- def refresh(self) -> AuthCredential:
- """Refresh an oauth token.
-
- Returns:
- An AuthCredential object containing the refreshed access token.
- """
- if not AUTHLIB_AVIALABLE:
- return self._auth_credential
- credential = self._auth_credential
- if not credential.oauth2:
- return credential
-
- if OAuth2Token({
- "expires_at": credential.oauth2.expires_at,
- "expires_in": credential.oauth2.expires_in,
- }).is_expired():
- client, token_endpoint = create_oauth2_session(
- self._auth_scheme, self._auth_credential
- )
- if not client:
- logger.warning("Could not create OAuth2 session for token refresh")
- return credential
-
- try:
- tokens = client.refresh_token(
- url=token_endpoint,
- refresh_token=credential.oauth2.refresh_token,
- )
- update_credential_with_tokens(self._auth_credential, tokens)
- logger.info("Successfully refreshed OAuth2 tokens")
- except Exception as e:
- logger.error("Failed to refresh OAuth2 tokens: %s", e)
- # Return original credential on failure
- return credential
-
- return self._auth_credential
diff --git a/src/google/adk/auth/refresher/oauth2_credential_refresher.py b/src/google/adk/auth/refresher/oauth2_credential_refresher.py
new file mode 100644
index 000000000..4c19520ce
--- /dev/null
+++ b/src/google/adk/auth/refresher/oauth2_credential_refresher.py
@@ -0,0 +1,126 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth2 credential refresher implementation."""
+
+from __future__ import annotations
+
+import json
+import logging
+from typing import Optional
+
+from google.adk.auth.auth_credential import AuthCredential
+from google.adk.auth.auth_schemes import AuthScheme
+from google.adk.auth.oauth2_credential_util import create_oauth2_session
+from google.adk.auth.oauth2_credential_util import update_credential_with_tokens
+from google.adk.utils.feature_decorator import experimental
+from google.auth.transport.requests import Request
+from google.oauth2.credentials import Credentials
+from typing_extensions import override
+
+from .base_credential_refresher import BaseCredentialRefresher
+
+try:
+ from authlib.oauth2.rfc6749 import OAuth2Token
+
+ AUTHLIB_AVIALABLE = True
+except ImportError:
+ AUTHLIB_AVIALABLE = False
+
+logger = logging.getLogger("google_adk." + __name__)
+
+
+@experimental
+class OAuth2CredentialRefresher(BaseCredentialRefresher):
+ """Refreshes OAuth2 credentials including Google OAuth2 JSON credentials."""
+
+ @override
+ async def is_refresh_needed(
+ self,
+ auth_credential: AuthCredential,
+ auth_scheme: Optional[AuthScheme] = None,
+ ) -> bool:
+ """Check if the OAuth2 credential needs to be refreshed.
+
+ Args:
+ auth_credential: The OAuth2 credential to check.
+ auth_scheme: The OAuth2 authentication scheme (optional for Google OAuth2 JSON).
+
+ Returns:
+ True if the credential needs to be refreshed, False otherwise.
+ """
+
+ # Handle regular OAuth2 credentials
+ if auth_credential.oauth2:
+ if not AUTHLIB_AVIALABLE:
+ return False
+
+ return OAuth2Token({
+ "expires_at": auth_credential.oauth2.expires_at,
+ "expires_in": auth_credential.oauth2.expires_in,
+ }).is_expired()
+
+ return False
+
+ @override
+ async def refresh(
+ self,
+ auth_credential: AuthCredential,
+ auth_scheme: Optional[AuthScheme] = None,
+ ) -> AuthCredential:
+ """Refresh the OAuth2 credential.
+ If refresh failed, return the original credential.
+
+ Args:
+ auth_credential: The OAuth2 credential to refresh.
+ auth_scheme: The OAuth2 authentication scheme (optional for Google OAuth2 JSON).
+
+ Returns:
+ The refreshed credential.
+
+ """
+
+ # Handle regular OAuth2 credentials
+ if auth_credential.oauth2 and auth_scheme:
+ if not AUTHLIB_AVIALABLE:
+ return auth_credential
+
+ if not auth_credential.oauth2:
+ return auth_credential
+
+ if OAuth2Token({
+ "expires_at": auth_credential.oauth2.expires_at,
+ "expires_in": auth_credential.oauth2.expires_in,
+ }).is_expired():
+ client, token_endpoint = create_oauth2_session(
+ auth_scheme, auth_credential
+ )
+ if not client:
+ logger.warning("Could not create OAuth2 session for token refresh")
+ return auth_credential
+
+ try:
+ tokens = client.refresh_token(
+ url=token_endpoint,
+ refresh_token=auth_credential.oauth2.refresh_token,
+ )
+ update_credential_with_tokens(auth_credential, tokens)
+ logger.debug("Successfully refreshed OAuth2 tokens")
+ except Exception as e:
+ # TODO reconsider whether we should raise error when refresh failed.
+ logger.error("Failed to refresh OAuth2 tokens: %s", e)
+ # Return original credential on failure
+ return auth_credential
+
+ return auth_credential
diff --git a/src/google/adk/cli/browser/chunk-EQDQRRRY.js b/src/google/adk/cli/browser/chunk-EQDQRRRY.js
new file mode 100644
index 000000000..134dff1fa
--- /dev/null
+++ b/src/google/adk/cli/browser/chunk-EQDQRRRY.js
@@ -0,0 +1 @@
+var p=Object.create;var j=Object.defineProperty,q=Object.defineProperties,r=Object.getOwnPropertyDescriptor,s=Object.getOwnPropertyDescriptors,t=Object.getOwnPropertyNames,g=Object.getOwnPropertySymbols,u=Object.getPrototypeOf,k=Object.prototype.hasOwnProperty,m=Object.prototype.propertyIsEnumerable;var l=(a,b,c)=>b in a?j(a,b,{enumerable:!0,configurable:!0,writable:!0,value:c}):a[b]=c,w=(a,b)=>{for(var c in b||={})k.call(b,c)&&l(a,c,b[c]);if(g)for(var c of g(b))m.call(b,c)&&l(a,c,b[c]);return a},x=(a,b)=>q(a,s(b));var y=(a,b)=>{var c={};for(var d in a)k.call(a,d)&&b.indexOf(d)<0&&(c[d]=a[d]);if(a!=null&&g)for(var d of g(a))b.indexOf(d)<0&&m.call(a,d)&&(c[d]=a[d]);return c};var z=(a,b)=>()=>(b||a((b={exports:{}}).exports,b),b.exports);var v=(a,b,c,d)=>{if(b&&typeof b=="object"||typeof b=="function")for(let e of t(b))!k.call(a,e)&&e!==c&&j(a,e,{get:()=>b[e],enumerable:!(d=r(b,e))||d.enumerable});return a};var A=(a,b,c)=>(c=a!=null?p(u(a)):{},v(b||!a||!a.__esModule?j(c,"default",{value:a,enumerable:!0}):c,a));var B=(a,b,c)=>new Promise((d,e)=>{var n=f=>{try{h(c.next(f))}catch(i){e(i)}},o=f=>{try{h(c.throw(f))}catch(i){e(i)}},h=f=>f.done?d(f.value):Promise.resolve(f.value).then(n,o);h((c=c.apply(a,b)).next())});export{w as a,x as b,y as c,z as d,A as e,B as f};
diff --git a/src/google/adk/cli/browser/chunk-TXJFAAIW.js b/src/google/adk/cli/browser/chunk-TXJFAAIW.js
new file mode 100644
index 000000000..24066bccc
--- /dev/null
+++ b/src/google/adk/cli/browser/chunk-TXJFAAIW.js
@@ -0,0 +1,2 @@
+import"./chunk-EQDQRRRY.js";var O=function(l,i){if(!(l instanceof i))throw new TypeError("Cannot call a class as a function")},R=function(){function l(i,e){for(var t=0;t1&&arguments[1]!==void 0?arguments[1]:1,e=i>0?l.toFixed(i).replace(/0+$/,"").replace(/\.$/,""):l.toString();return e||"0"}var z=function(){function l(i,e,t,r){O(this,l);var n=this;function o(a){if(a.startsWith("hsl")){var s=a.match(/([\-\d\.e]+)/g).map(Number),p=y(s,4),u=p[0],f=p[1],d=p[2],b=p[3];b===void 0&&(b=1),u/=360,f/=100,d/=100,n.hsla=[u,f,d,b]}else if(a.startsWith("rgb")){var m=a.match(/([\-\d\.e]+)/g).map(Number),h=y(m,4),v=h[0],g=h[1],S=h[2],k=h[3];k===void 0&&(k=1),n.rgba=[v,g,S,k]}else a.startsWith("#")?n.rgba=l.hexToRgb(a):n.rgba=l.nameToRgb(a)||l.hexToRgb(a)}if(i!==void 0)if(Array.isArray(i))this.rgba=i;else if(t===void 0){var c=i&&""+i;c&&o(c.toLowerCase())}else this.rgba=[i,e,t,r===void 0?1:r]}return R(l,[{key:"printRGB",value:function(e){var t=e?this.rgba:this.rgba.slice(0,3),r=t.map(function(n,o){return A(n,o===3?3:0)});return e?"rgba("+r+")":"rgb("+r+")"}},{key:"printHSL",value:function(e){var t=[360,100,100,1],r=["","%","%",""],n=e?this.hsla:this.hsla.slice(0,3),o=n.map(function(c,a){return A(c*t[a],a===3?3:1)+r[a]});return e?"hsla("+o+")":"hsl("+o+")"}},{key:"printHex",value:function(e){var t=this.hex;return e?t:t.substring(0,7)}},{key:"rgba",get:function(){if(this._rgba)return this._rgba;if(!this._hsla)throw new Error("No color is set");return this._rgba=l.hslToRgb(this._hsla)},set:function(e){e.length===3&&(e[3]=1),this._rgba=e,this._hsla=null}},{key:"rgbString",get:function(){return this.printRGB()}},{key:"rgbaString",get:function(){return this.printRGB(!0)}},{key:"hsla",get:function(){if(this._hsla)return this._hsla;if(!this._rgba)throw new Error("No color is set");return this._hsla=l.rgbToHsl(this._rgba)},set:function(e){e.length===3&&(e[3]=1),this._hsla=e,this._rgba=null}},{key:"hslString",get:function(){return this.printHSL()}},{key:"hslaString",get:function(){return this.printHSL(!0)}},{key:"hex",get:function(){var e=this.rgba,t=e.map(function(r,n){return n<3?r.toString(16):Math.round(r*255).toString(16)});return"#"+t.map(function(r){return r.padStart(2,"0")}).join("")},set:function(e){this.rgba=l.hexToRgb(e)}}],[{key:"hexToRgb",value:function(e){var t=(e.startsWith("#")?e.slice(1):e).replace(/^(\w{3})$/,"$1F").replace(/^(\w)(\w)(\w)(\w)$/,"$1$1$2$2$3$3$4$4").replace(/^(\w{6})$/,"$1FF");if(!t.match(/^([0-9a-fA-F]{8})$/))throw new Error("Unknown hex color; "+e);var r=t.match(/^(\w\w)(\w\w)(\w\w)(\w\w)$/).slice(1).map(function(n){return parseInt(n,16)});return r[3]=r[3]/255,r}},{key:"nameToRgb",value:function(e){var t=e.toLowerCase().replace("at","T").replace(/[aeiouyldf]/g,"").replace("ght","L").replace("rk","D").slice(-5,4),r=N[t];return r===void 0?r:l.hexToRgb(r.replace(/\-/g,"00").padStart(6,"f"))}},{key:"rgbToHsl",value:function(e){var t=y(e,4),r=t[0],n=t[1],o=t[2],c=t[3];r/=255,n/=255,o/=255;var a=Math.max(r,n,o),s=Math.min(r,n,o),p=void 0,u=void 0,f=(a+s)/2;if(a===s)p=u=0;else{var d=a-s;switch(u=f>.5?d/(2-a-s):d/(a+s),a){case r:p=(n-o)/d+(n1&&(g-=1),g<.16666666666666666?h+(v-h)*6*g:g<.5?v:g<.6666666666666666?h+(v-h)*(.6666666666666666-g)*6:h},f=o<.5?o*(1+n):o+n-o*n,d=2*o-f;a=u(d,f,r+1/3),s=u(d,f,r),p=u(d,f,r-1/3)}var b=[a*255,s*255,p*255].map(Math.round);return b[3]=c,b}}]),l}(),F=function(){function l(){O(this,l),this._events=[]}return R(l,[{key:"add",value:function(e,t,r){e.addEventListener(t,r,!1),this._events.push({target:e,type:t,handler:r})}},{key:"remove",value:function(e,t,r){this._events=this._events.filter(function(n){var o=!0;return e&&e!==n.target&&(o=!1),t&&t!==n.type&&(o=!1),r&&r!==n.handler&&(o=!1),o&&l._doRemove(n.target,n.type,n.handler),!o})}},{key:"destroy",value:function(){this._events.forEach(function(e){return l._doRemove(e.target,e.type,e.handler)}),this._events=[]}}],[{key:"_doRemove",value:function(e,t,r){e.removeEventListener(t,r,!1)}}]),l}();function U(l){var i=document.createElement("div");return i.innerHTML=l,i.firstElementChild}function T(l,i,e){var t=!1;function r(a,s,p){return Math.max(s,Math.min(a,p))}function n(a,s,p){if(p&&(t=!0),!!t){a.preventDefault();var u=i.getBoundingClientRect(),f=u.width,d=u.height,b=s.clientX,m=s.clientY,h=r(b-u.left,0,f),v=r(m-u.top,0,d);e(h/f,v/d)}}function o(a,s){var p=a.buttons===void 0?a.which:a.buttons;p===1?n(a,a,s):t=!1}function c(a,s){a.touches.length===1?n(a,a.touches[0],s):t=!1}l.add(i,"mousedown",function(a){o(a,!0)}),l.add(i,"touchstart",function(a){c(a,!0)}),l.add(window,"mousemove",o),l.add(i,"touchmove",c),l.add(window,"mouseup",function(a){t=!1}),l.add(i,"touchend",function(a){t=!1}),l.add(i,"touchcancel",function(a){t=!1})}var B=`linear-gradient(45deg, lightgrey 25%, transparent 25%, transparent 75%, lightgrey 75%) 0 0 / 2em 2em,
+ linear-gradient(45deg, lightgrey 25%, white 25%, white 75%, lightgrey 75%) 1em 1em / 2em 2em`,G=360,P="keydown",x="mousedown",H="focusin";function _(l,i){return(i||document).querySelector(l)}function M(l){l.preventDefault(),l.stopPropagation()}function D(l,i,e,t,r){l.add(i,P,function(n){e.indexOf(n.key)>=0&&(r&&M(n),t(n))})}var W=function(){function l(i){O(this,l),this.settings={popup:"right",layout:"default",alpha:!0,editor:!0,editorFormat:"hex",cancelButton:!1,defaultColor:"#0cf"},this._events=new F,this.onChange=null,this.onDone=null,this.onOpen=null,this.onClose=null,this.setOptions(i)}return R(l,[{key:"setOptions",value:function(e){var t=this;if(!e)return;var r=this.settings;function n(s,p,u){for(var f in s)u&&u.indexOf(f)>=0||(p[f]=s[f])}if(e instanceof HTMLElement)r.parent=e;else{r.parent&&e.parent&&r.parent!==e.parent&&(this._events.remove(r.parent),this._popupInited=!1),n(e,r),e.onChange&&(this.onChange=e.onChange),e.onDone&&(this.onDone=e.onDone),e.onOpen&&(this.onOpen=e.onOpen),e.onClose&&(this.onClose=e.onClose);var o=e.color||e.colour;o&&this._setColor(o)}var c=r.parent;if(c&&r.popup&&!this._popupInited){var a=function(p){return t.openHandler(p)};this._events.add(c,"click",a),D(this._events,c,[" ","Spacebar","Enter"],a),this._popupInited=!0}else e.parent&&!r.popup&&this.show()}},{key:"openHandler",value:function(e){if(this.show()){e&&e.preventDefault(),this.settings.parent.style.pointerEvents="none";var t=e&&e.type===P?this._domEdit:this.domElement;setTimeout(function(){return t.focus()},100),this.onOpen&&this.onOpen(this.colour)}}},{key:"closeHandler",value:function(e){var t=e&&e.type,r=!1;if(!e)r=!0;else if(t===x||t===H){var n=(this.__containedEvent||0)+100;e.timeStamp>n&&(r=!0)}else M(e),r=!0;r&&this.hide()&&(this.settings.parent.style.pointerEvents="",t!==x&&this.settings.parent.focus(),this.onClose&&this.onClose(this.colour))}},{key:"movePopup",value:function(e,t){this.closeHandler(),this.setOptions(e),t&&this.openHandler()}},{key:"setColor",value:function(e,t){this._setColor(e,{silent:t})}},{key:"_setColor",value:function(e,t){if(typeof e=="string"&&(e=e.trim()),!!e){t=t||{};var r=void 0;try{r=new z(e)}catch(o){if(t.failSilently)return;throw o}if(!this.settings.alpha){var n=r.hsla;n[3]=1,r.hsla=n}this.colour=this.color=r,this._setHSLA(null,null,null,null,t)}}},{key:"setColour",value:function(e,t){this.setColor(e,t)}},{key:"show",value:function(){var e=this.settings.parent;if(!e)return!1;if(this.domElement){var t=this._toggleDOM(!0);return this._setPosition(),t}var r=this.settings.template||'