## Core Agent Architecture
### Base Agent Class
```python
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
import json
@dataclass
class Task:
id: str
description: str
status: "pending" | "in_progress" | "completed" | "failed"
result: Optional[Any] = None
subtasks: List["Task"] = None
class Memory:
"""Agent memory with short and long-term storage"""
def __init__(self):
self.short_term = [] # Recent interactions
self.long_term = {} # Key facts and learnings
self.working_memory = {} # Current context
def add_interaction(self, role: str, content: str):
"""Add to short-term memory"""
self.short_term.append({"role": role, "content": content})
if len(self.short_term) > 20:
self.short_term = self.short_term[-20:]
def store_fact(self, key: str, value: Any):
"""Store in long-term memory"""
self.long_term[key] = value
def get_context(self) -> str:
"""Get current memory context"""
context = "Recent interactions:\n"
for interaction in self.short_term[-5:]:
context += f"{interaction['role']}: {interaction['content']}\n"
if self.long_term:
context += "\nKey facts:\n"
for k, v in self.long_term.items():
context += f"- {k}: {v}\n"
return context
class Tool:
"""Tool definition for agents"""
def __init__(self, name: str, description: str, func: callable, parameters: dict):
self.name = name
self.description = description
self.func = func
self.parameters = parameters
async def execute(self, **kwargs) -> Any:
"""Execute tool with validation"""
for param, config in self.parameters.items():
if config.get("required") and param not in kwargs:
raise ValueError(f"Missing required parameter: {param}")
return await self.func(**kwargs)
class Agent:
"""Base autonomous agent"""
def __init__(self, name: str, llm_client, tools: List[Tool] = None):
self.name = name
self.llm = llm_client
self.tools = {t.name: t for t in (tools or [])}
self.memory = Memory()
self.task_queue = []
async def plan(self, goal: str) -> List[Task]:
"""Break goal into executable tasks"""
planning_prompt = f"""
Goal: {goal}
Available tools:
{self._tools_description()}
Break this goal into 3-5 specific, actionable tasks.
Return as JSON array with 'description' and optional 'tool' fields.
"""
response = await self.llm.complete(planning_prompt)
tasks_data = json.loads(response)
return [
Task(id=f"task_{i}", description=t["description"], status="pending")
for i, t in enumerate(tasks_data)
]
async def execute_task(self, task: Task) -> Any:
"""Execute a single task"""
task.status = "in_progress"
execution_prompt = f"""
Task: {task.description}
Context:
{self.memory.get_context()}
Available tools: {list(self.tools.keys())}
Either:
1. Use a tool by returning: {{"action": "tool", "tool": "name", "params": {{}}}}
2. Complete directly: {{"action": "complete", "result": "..."}}
"""
response = await self.llm.complete(execution_prompt)
action = json.loads(response)
if action["action"] == "tool":
tool = self.tools.get(action["tool"])
if tool:
result = await tool.execute(**action.get("params", {}))
task.result = result
else:
task.status = "failed"
raise ValueError(f"Unknown tool: {action['tool']}")
else:
task.result = action.get("result")
task.status = "completed"
self.memory.add_interaction("assistant", f"Completed: {task.description}")
return task.result
async def run(self, goal: str) -> Dict[str, Any]:
"""Execute full agent workflow"""
self.memory.add_interaction("user", goal)
tasks = await self.plan(goal)
results = []
for task in tasks:
try:
result = await self.execute_task(task)
results.append({"task": task.description, "result": result, "status": "success"})
except Exception as e:
results.append({"task": task.description, "error": str(e), "status": "failed"})
return {
"goal": goal,
"tasks_completed": len([r for r in results if r["status"] == "success"]),
"results": results
}
def _tools_description(self) -> str:
"""Generate tools description for prompts"""
desc = []
for name, tool in self.tools.items():
desc.append(f"- {name}: {tool.description}")
return "\n".join(desc)
```
### ReAct Pattern Implementation
```python
class ReActAgent(Agent):
"""Agent using Reasoning + Acting pattern"""
async def run(self, goal: str, max_iterations: int = 10):
"""Execute with ReAct loop"""
self.memory.add_interaction("user", goal)
for i in range(max_iterations):
thought_prompt = f"""
Goal: {goal}
Step: {i + 1}
Previous actions and observations:
{self.memory.get_context()}
Think step by step. What should I do next?
Format: Thought: [your reasoning]
"""
thought = await self.llm.complete(thought_prompt)
self.memory.add_interaction("assistant", thought)
action_prompt = f"""
Based on this thought: {thought}
Choose an action:
{self._tools_description()}
Or: {{"action": "finish", "answer": "..."}}
Return JSON with "action" and parameters.
"""
action_response = await self.llm.complete(action_prompt)
action = json.loads(action_response)
if action.get("action") == "finish":
return action.get("answer")
tool = self.tools.get(action.get("action"))
if tool:
observation = await tool.execute(**action.get("params", {}))
self.memory.add_interaction("system", f"Observation: {observation}")
return "Max iterations reached"
```
## Advanced Features
### Self-Reflection
```python
async def reflect_and_improve(self, task_result: dict):
"""Agent self-reflection for improvement"""
reflection_prompt = f"""
Review this completed task:
{json.dumps(task_result, indent=2)}
What went well?
What could be improved?
What patterns should I remember?
"""
reflection = await self.llm.complete(reflection_prompt)
self.memory.store_fact(
f"learning_{datetime.now().isoformat()}",
reflection
)
```
### Multi-Agent Collaboration
```python
class AgentTeam:
"""Coordinate multiple specialized agents"""
def __init__(self):
self.agents = {}
self.coordinator = None
def add_agent(self, role: str, agent: Agent):
self.agents[role] = agent
async def delegate(self, task: str, required_skills: List[str]):
"""Delegate to appropriate agent(s)"""
candidates = [
role for role, agent in self.agents.items()
if any(skill in agent.capabilities for skill in required_skills)
]
if not candidates:
raise ValueError("No agent with required skills")
return await self.agents[candidates[0]].run(task)
```
## Best Practices
1. **Memory Management**: Balance context window limits with relevant history
2. **Error Recovery**: Implement retry logic and graceful degradation
3. **Tool Design**: Keep tools atomic and composable
4. **Observability**: Log agent reasoning and decisions
5. **Safety**: Validate outputs before acting on them