AI Agent开发实战(一):环境搭建与工具链配置
一、开场:万事开头难
大家好,我是老金。
很多朋友问我:想学AI Agent开发,但不知道从哪开始?
- Python版本选哪个?
- 用什么LLM API?
- 框架选LangChain还是自己写?
- 开发环境怎么配置?
这些问题困扰了很多想入门的朋友。今天开始,我将用10篇文章手把手教你从零开发AI Agent。
这是第一篇:环境搭建与工具链配置。
二、开发环境准备
2.1 Python环境
# 推荐使用Python 3.10+(支持更好的类型提示)
# 使用pyenv管理Python版本
# 安装pyenv(macOS/Linux)
curl https://pyenv.run | bash
# 安装Python 3.11
pyenv install 3.11.7
# 创建项目目录
mkdir ai-agent-tutorial
cd ai-agent-tutorial
# 设置本地Python版本
pyenv local 3.11.7
# 验证
python --version
# Python 3.11.7
2.2 虚拟环境
# 使用venv创建虚拟环境
python -m venv venv
# 激活虚拟环境
# macOS/Linux
source venv/bin/activate
# Windows
.venvScriptsactivate
# 升级pip
pip install --upgrade pip
2.3 项目结构
# 创建项目结构
mkdir -p src/agents src/tools src/memory src/utils tests
touch src/__init__.py
touch src/agents/__init__.py
touch src/tools/__init__.py
touch src/memory/__init__.py
touch src/utils/__init__.py
touch requirements.txt
touch .env.example
touch README.md
ai-agent-tutorial/
├── src/
│ ├── agents/ # Agent实现
│ ├── tools/ # 工具定义
│ ├── memory/ # 记忆系统
│ └── utils/ # 工具函数
├── tests/ # 测试文件
├── requirements.txt # 依赖管理
├── .env.example # 环境变量模板
└── README.md # 项目说明
三、核心依赖安装
3.1 requirements.txt
# LLM API
openai>=1.0.0
anthropic>=0.18.0
# 框架(可选,本教程从零开始)
# langchain>=0.1.0
# langchain-openai>=0.1.0
# 工具调用
pydantic>=2.0.0
python-dotenv>=1.0.0
# 向量数据库(用于记忆)
chromadb>=0.4.0
faiss-cpu>=1.7.0
# HTTP客户端
httpx>=0.25.0
aiohttp>=3.9.0
# 工具函数
tiktoken>=0.5.0
jinja2>=3.1.0
# 开发工具
pytest>=7.0.0
pytest-asyncio>=0.21.0
black>=23.0.0
ruff>=0.1.0
# 日志
loguru>=0.7.0
3.2 安装依赖
pip install -r requirements.txt
四、LLM API配置
4.1 环境变量配置
# .env.example
# 复制为.env并填入你的API Key
# OpenAI
OPENAI_API_KEY=sk-xxx
OPENAI_BASE_URL=https://api.openai.com/v1 # 可选,用于代理
# Anthropic Claude
ANTHROPIC_API_KEY=sk-ant-xxx
# 国内模型(可选)
# ZHIPU_API_KEY=xxx
# MOONSHOT_API_KEY=xxx
# DEEPSEEK_API_KEY=xxx
4.2 配置加载
# src/utils/config.py
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import Optional
load_dotenv()
class LLMConfig(BaseModel):
"""LLM配置"""
openai_api_key: Optional[str] = None
openai_base_url: str = "https://api.openai.com/v1"
anthropic_api_key: Optional[str] = None
default_model: str = "gpt-4-turbo-preview"
max_tokens: int = 4096
temperature: float = 0.7
@classmethod
def from_env(cls) -> "LLMConfig":
"""从环境变量加载配置"""
return cls(
openai_api_key=os.getenv("OPENAI_API_KEY"),
openai_base_url=os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1"),
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
default_model=os.getenv("DEFAULT_MODEL", "gpt-4-turbo-preview"),
max_tokens=int(os.getenv("MAX_TOKENS", "4096")),
temperature=float(os.getenv("TEMPERATURE", "0.7"))
)
# 全局配置实例
config = LLMConfig.from_env()
五、LLM客户端封装
5.1 基础客户端
# src/utils/llm_client.py
from openai import AsyncOpenAI
from anthropic import AsyncAnthropic
from typing import Optional, List, Dict, Any, Union
from .config import config
import logging
logger = logging.getLogger(__name__)
class LLMClient:
"""LLM客户端封装"""
def __init__(
self,
provider: str = "openai", # openai | anthropic
model: Optional[str] = None
):
self.provider = provider
self.model = model or config.default_model
if provider == "openai":
if not config.openai_api_key:
raise ValueError("OPENAI_API_KEY not set")
self.client = AsyncOpenAI(
api_key=config.openai_api_key,
base_url=config.openai_base_url
)
elif provider == "anthropic":
if not config.anthropic_api_key:
raise ValueError("ANTHROPIC_API_KEY not set")
self.client = AsyncAnthropic(
api_key=config.anthropic_api_key
)
else:
raise ValueError(f"Unknown provider: {provider}")
async def chat(
self,
messages: List[Dict[str, str]],
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
**kwargs
) -> str:
"""发送对话请求"""
if self.provider == "openai":
return await self._openai_chat(
messages, temperature, max_tokens, **kwargs
)
elif self.provider == "anthropic":
return await self._anthropic_chat(
messages, temperature, max_tokens, **kwargs
)
async def _openai_chat(
self,
messages: List[Dict[str, str]],
temperature: Optional[float],
max_tokens: Optional[int],
**kwargs
) -> str:
"""OpenAI对话"""
response = await self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=temperature or config.temperature,
max_tokens=max_tokens or config.max_tokens,
**kwargs
)
logger.info(f"OpenAI API called: {self.model}, tokens: {response.usage.total_tokens}")
return response.choices[0].message.content
async def _anthropic_chat(
self,
messages: List[Dict[str, str]],
temperature: Optional[float],
max_tokens: Optional[int],
**kwargs
) -> str:
"""Anthropic对话"""
# Claude格式转换
system = None
claude_messages = []
for msg in messages:
if msg["role"] == "system":
system = msg["content"]
else:
claude_messages.append(msg)
response = await self.client.messages.create(
model=self.model,
max_tokens=max_tokens or config.max_tokens,
system=system,
messages=claude_messages,
**kwargs
)
logger.info(f"Anthropic API called: {self.model}, tokens: {response.usage.input_tokens + response.usage.output_tokens}")
return response.content[0].text
async def chat_with_tools(
self,
messages: List[Dict[str, str]],
tools: List[Dict[str, Any]],
tool_choice: str = "auto"
) -> Dict[str, Any]:
"""带工具调用的对话(仅OpenAI支持)"""
if self.provider != "openai":
raise NotImplementedError("Tool calling only supported for OpenAI")
response = await self.client.chat.completions.create(
model=self.model,
messages=messages,
tools=tools,
tool_choice=tool_choice
)
message = response.choices[0].message
return {
"content": message.content,
"tool_calls": message.tool_calls,
"finish_reason": response.choices[0].finish_reason
}
5.2 使用示例
# 测试LLM客户端
import asyncio
from src.utils.llm_client import LLMClient
async def test_llm():
# OpenAI
client = LLMClient(provider="openai", model="gpt-4-turbo-preview")
response = await client.chat([
{"role": "system", "content": "你是一个友好的助手"},
{"role": "user", "content": "你好,介绍一下自己"}
])
print(response)
if __name__ == "__main__":
asyncio.run(test_llm())
六、日志系统
6.1 日志配置
# src/utils/logger.py
from loguru import logger
import sys
def setup_logger(log_file: str = "logs/agent.log"):
"""配置日志"""
# 移除默认处理器
logger.remove()
# 控制台输出
logger.add(
sys.stdout,
format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}",
level="INFO"
)
# 文件输出
logger.add(
log_file,
rotation="10 MB",
retention="7 days",
format="{time:YYYY-MM-DD HH:mm:ss} | {level: int:
"""计算文本token数"""
return len(self.encoding.encode(text))
def count_messages(self, messages: List[Dict[str, str]]) -> int:
"""计算消息列表token数"""
total = 0
for message in messages:
# 每条消息的开销
total += 4 # 消息格式开销
for key, value in message.items():
total += len(self.encoding.encode(value))
total += 2 # 对话开销
return total
def truncate(self, text: str, max_tokens: int) -> str:
"""截断文本到指定token数"""
tokens = self.encoding.encode(text)
if len(tokens) 0
print(f"'{text}' 的token数: {count}")
if __name__ == "__main__":
pytest.main([__file__, "-v"])
8.2 运行测试
# 运行所有测试
pytest tests/ -v
# 运行单个测试
pytest tests/test_llm_client.py -v
# 异步测试需要pytest-asyncio
pytest tests/ -v --asyncio-mode=auto
九、开发工具配置
9.1 代码格式化
# pyproject.toml
[tool.black]
line-length = 88
target-version = ['py311']
[tool.ruff]
line-length = 88
select = ["E", "F", "W", "I"]
ignore = ["E501"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]
9.2 VS Code配置
// .vscode/settings.json
{
"python.linting.enabled": true,
"python.linting.ruffEnabled": true,
"python.formatting.provider": "black",
"editor.formatOnSave": true,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": ["tests"]
}
十、总结
环境清单
| 组件 | 版本 | 用途 |
|---|---|---|
| Python | 3.11+ | 运行环境 |
| OpenAI SDK | 1.0+ | LLM调用 |
| Pydantic | 2.0+ | 数据验证 |
| pytest | 7.0+ | 测试框架 |
| loguru | 0.7+ | 日志系统 |
验证清单
- [ ] Python版本正确
- [ ] 虚拟环境激活
- [ ] 依赖安装完成
- [ ] API Key配置正确
- [ ] 测试通过
下期预告
下一篇:实现你的第一个AI Agent——从Hello World开始!
往期回顾
正文完