Hands-on session exploring the creation of custom agents using tools such as the Agent Development Kit, Vertex AI Agent Builder, and the Gemini and GCP command-line interface for rapid prototyping, automation, and experiments.
December 2025 · University of Manchester GDG
GitHub Repository+-----------------------------+
| (START) |
| sequential_pipeline_agent |
+--------------+--------------+
|
v
+-----------------------------+
| Step 1: parallel_research_agent |
+--------------+--------------+
|
+------------+------------+
| |
v v
+-----------------+ +----------------------+
| google_agent | | arxiv_research_agent |
+-----------------+ +----------------------+
| | | |
| v | v
| [ Google Search ] | [ arXiv API ]
| |
| "google_research_result"| "arxiv_research_result"
| |
+------------+------------+
|
v
+-----------------------------+
| Step 2: merger_agent |
+-----------------------------+
|
| (Synthesizes results)
| "Synthesized Report & Subject"
v
+-----------------------------+
| Step 3: email_agent |
+-----------------------------+
|
| (Generates & sends HTML email)
v
+-----------------------------+
| [ SMTP Server ] |
+-----------------------------+
|
v
+-----------------------------+
| (END) User's Inbox |
+-----------------------------+
# Python Env: https://www.python.org/downloads/
python -m venv .venv
# for macOS/Linux
source .venv/bin/activate
# for windows
.venv\Scripts\activate
pip install adk secure-smtplib arxiv
# Create an API key on https://aistudio.google.com/api-keys
# Create base project:
# Pick 1) gemini-2.5-flash
# Pick 1) Google AI Studio, provide the Key
adk create adk_research_assistant
adk run adk_research_assistant
GEMINI_MODEL=gemini-2.5-flash
We will use the arXiv API to gather research papers related to your query.
import os
from datetime import datetime
from typing import Dict, List, Literal, TypedDict
from google.adk.agents.llm_agent import Agent
from google.adk.tools.function_tool import FunctionTool
import arxiv
GEMINI_MODEL = os.getenv("GEMINI_MODEL", "gemini-2.5-flash")
class ArxivPaper(TypedDict):
title: str
authors: List[str]
year: int
arxiv_id: str
url: str
summary: str
topic: str
def _build_arxiv_query_string(topics: List[str]) -> str:
"""Build an arXiv API query string from a list of topics.
We keep this simple and AND the topic phrases together.
"""
cleaned = list(dict.fromkeys(t.strip() for t in topics if t.strip()))
if not cleaned:
raise ValueError("At least one topic is required")
# Example: "(ti:\"graph neural networks\" OR abs:\"graph neural networks\")"
parts = []
for t in cleaned:
phrase = t.replace('"', '\\"')
parts.append(f'(ti:"{phrase}" OR abs:"{phrase}")')
return " AND ".join(parts)
def search_arxiv_with_client(
topics: List[str],
*,
max_results: int = 50,
sort_by: Literal[
"relevance",
"lastUpdatedDate",
"submittedDate",
] = "submittedDate",
) -> List[ArxivPaper]:
"""Use the `arxiv` Python client to fetch recent papers.
This is a low-level utility that the LLM agent can call via tool usage,
or that you can call directly from Python.
"""
query = _build_arxiv_query_string(topics)
sort_criterion_map: Dict[str, arxiv.SortCriterion] = {
"relevance": arxiv.SortCriterion.Relevance,
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
"submittedDate": arxiv.SortCriterion.SubmittedDate,
}
search = arxiv.Search(
query=query,
max_results=max_results,
sort_by=sort_criterion_map[sort_by],
)
client = arxiv.Client()
papers: List[ArxivPaper] = []
for result in client.results(search):
year = result.published.year if result.published else datetime.utcnow().year
papers.append(
ArxivPaper(
title=result.title,
authors=[a.name for a in result.authors],
year=year,
arxiv_id=result.get_short_id(),
url=result.entry_id,
summary=result.summary,
topic=", ".join(topics),
)
)
return papers
search_arxiv_with_client_tool = FunctionTool(func=search_arxiv_with_client)
arxiv_research_agent = Agent(
model=GEMINI_MODEL,
name="arxiv_research_agent",
description=(
"Sub-agent that can call a custom arXiv search tool "
"to find recent papers for given research topics."
),
instruction=(
"Role: academic research assistant specialized in arXiv.\n"
"Capabilities: access to search_arxiv_with_client(topics, max_results, sort_by) returning structured paper metadata.\n"
"Workflow:\n"
"1. Normalize the supplied topics and decide whether to call the tool once or multiple times (e.g., one per topic cluster).\n"
"2. Choose max_results (default 50, adjust when many topics) and an appropriate sort order (use submittedDate unless directed otherwise), then call the tool.\n"
"3. Merge the results, de-duplicate by arXiv ID, and organize them by topic or theme.\n"
"Output: Provide each paper's title, authors, year, arXiv ID, URL, and a 1–2 sentence summary. Conclude with key takeaways or gaps."
),
tools=[search_arxiv_with_client_tool],
output_key="arxiv_research_result",
)
pip install arxiv
We will use MailerSend to send emails with the research report.
Make an account on mailersend.com, get SMTP credentials.
import os
from dataclasses import dataclass
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from google.adk.agents.llm_agent import Agent
from google.adk.tools.function_tool import FunctionTool
GEMINI_MODEL = os.getenv("GEMINI_MODEL", "gemini-2.5-flash")
SMTP_DEFAULT_PORT = int(os.getenv("SMTP_DEFAULT_PORT", 587))
SMTP_HOST = os.getenv("SMTP_HOST")
SMTP_USERNAME = os.getenv("SMTP_USERNAME", "")
SMTP_PASSWORD = os.getenv("SMTP_PASSWORD", "")
FROM_ADDR = os.getenv("FROM_ADDR", "")
TO_ADDR = os.getenv("TO_ADDR", "")
@dataclass
class EmailConfig:
smtp_host: str
smtp_port: int
username: str
password: str
use_tls: bool = True
def _send_html_email(
config: EmailConfig,
*,
subject: str,
html_body: str,
) -> None:
"""Low-level helper to send an HTML email via SMTP."""
msg = MIMEMultipart("alternative")
msg["Subject"] = subject
msg["From"] = FROM_ADDR
msg["To"] = TO_ADDR
msg.attach(MIMEText(html_body, "html"))
with smtplib.SMTP(config.smtp_host, config.smtp_port) as server:
if config.use_tls:
server.starttls()
if config.username and config.password:
server.login(config.username, config.password)
server.sendmail(FROM_ADDR, [TO_ADDR], msg.as_string())
def send_research_email(
*,
subject: str,
html_body: str,
) -> str:
"""Sends an HTML email with the given subject and body."""
if not SMTP_HOST:
return (
"Preview only; email not sent.\n"
f"From: {FROM_ADDR}\n"
f"To: {TO_ADDR}\n"
f"Subject: {subject}\n"
f"HTML Body:\n{html_body}"
)
config = EmailConfig(
smtp_host=SMTP_HOST,
smtp_port=SMTP_DEFAULT_PORT,
username=SMTP_USERNAME,
password=SMTP_PASSWORD,
)
_send_html_email(
config,
subject=subject,
html_body=html_body,
)
return "Email sent successfully."
send_research_email_tool = FunctionTool(func=send_research_email)
email_agent = Agent(
model=GEMINI_MODEL,
name="email_body_generator",
description="Generates clean, readable HTML emails from instructions.",
instruction=(
"You are an assistant that writes professional, accessible HTML "
"emails. Given a set of instructions, produce a complete HTML "
"document suitable for the email body. Use semantic tags (h1, h2, "
"p, ul/li) and inline styles only where necessary. Do not include "
"external CSS or scripts."
"Create an HTML beautiful stylized email body with the following requirements. Ensure to add a frame, header, footer, relevant to the topic of the content."
"Do not include the or tags, only the inner content. Use the send_research_email_tool tool to send the email."
),
tools=[send_research_email_tool],
)
pip install secure-smtplib
Once you have made an account, update your .env file to contain:
SMTP_DEFAULT_PORT=587
SMTP_HOST="smtp.mailersend.net"
SMTP_USERNAME=""
SMTP_PASSWORD=""
FROM_ADDR="test@DOMAIN_GENERATED"
TO_ADDR="YOUR_EMAIL"
This is where we combine all the agents to create a seamless research assistant workflow.
We will use the Google Search and arXiv agents to gather information, then merge and email the results.
import os
from google.adk.agents.sequential_agent import SequentialAgent
from google.adk.agents.parallel_agent import ParallelAgent
from google.adk.agents.llm_agent import LlmAgent
from google.adk.tools.google_search_tool import google_search
from arxiv_agent import arxiv_research_agent
from email_agent import email_agent
GEMINI_MODEL = os.getenv("GEMINI_MODEL", "gemini-2.5-flash")
google_agent = LlmAgent(
name="GoogleSearchResearchAgent",
model=GEMINI_MODEL,
instruction="""You are an AI research assistant specializing in finding latest research using web search.
1. Invoke the Google Search tool with a focused query before drafting your answer.
2. Ground every statement in the retrieved results and synthesize them into one concise (1–2 sentence) summary.
3. Output only that summary; include citations and extra commentary.
""",
description="Researches using Google.",
tools=[google_search],
# Store result in state for the merger agent
output_key="google_research_result",
)
parallel_research_agent = ParallelAgent(
name="ParallelWebResearchAgent",
sub_agents=[google_agent, arxiv_research_agent],
description="Runs multiple research agents in parallel to gather information.",
)
merger_agent = LlmAgent(
name="MergeSynthesisAgent",
model=GEMINI_MODEL,
instruction="""You are an AI assistant merging research outputs into a structured report.
Follow this workflow:
1. Read the Google, arXiv, Duckie, and Hacker News summaries below; do not use external knowledge.
2. Identify the main topics or themes present and organize them into clear headings.
3. Under each heading, integrate the relevant findings, attributing them to their source (Google, arXiv, Duckie, or Hacker News) in-line.
4. Conclude with a brief overall insight section synthesizing cross-source takeaways.
5. Provide a suggested email subject line on the final line in the format: "Suggested Subject: ..."
Input Summaries:
- Google Search Results:
{google_research_result}
- arXiv Papers:
{arxiv_research_result}
Output Requirements:
- Use Markdown headings (##) for each topic.
- Write concise paragraphs or bullet points under each heading.
- Ensure all content is grounded exclusively in the input summaries.
- End with the required subject line and nothing else.
""",
description="Combines research findings from parallel agents into a structured, cited report, strictly grounded on provided inputs.",
)
sequential_pipeline_agent = SequentialAgent(
name="ResearchAndSynthesisPipeline",
sub_agents=[parallel_research_agent, merger_agent, email_agent],
description="Coordinates parallel research and synthesizes the results.",
)
root_agent = sequential_pipeline_agent
adk run .
+-----------------------------+
| (START) |
| sequential_pipeline_agent |
+--------------+--------------+
|
v
+-----------------------------+
| Step 1: parallel_research_agent |
+--------------+--------------+
|
+------------+------------+
| |
v v
+-----------------+ +----------------------+
| google_agent | | arxiv_research_agent |
+-----------------+ +----------------------+
| | | |
| v | v
| [ Google Search ] | [ arXiv API ]
| |
| "google_research_result"| "arxiv_research_result"
| |
+------------+------------+
|
v
+-----------------------------+
| Step 2: merger_agent |
+-----------------------------+
|
| (Synthesizes results)
| "Synthesized Report & Subject"
v
+-----------------------------+
| Step 3: email_agent |
+-----------------------------+
|
| (Generates & sends HTML email)
v
+-----------------------------+
| [ SMTP Server ] |
+-----------------------------+
|
v
+-----------------------------+
| (END) User's Inbox |
+-----------------------------+
Generate new sub-agents, architecture diagrams, better prompts, tests and new ideas.
npm install -g @google/gemini-cli
brew install gemini-cli
gemini