FastAPI Integration

AgenticFleet provides a FastAPI integration for building scalable agent-powered web APIs.

Setup

Install the required dependencies:

pip install agentic-fleet[fastapi]

Basic Integration

Create a FastAPI app with AgenticFleet:

from fastapi import FastAPI
from agentic_fleet import Agent
from agentic_fleet.integrations.fastapi import AgentRouter

# Create FastAPI app
app = FastAPI()

# Create agent
agent = Agent(
    name="assistant",
    model="gpt-4"
)

# Create router
router = AgentRouter(agent)

# Add routes to app
app.include_router(router)

Endpoints

Chat Endpoint

from fastapi import FastAPI
from agentic_fleet.integrations.fastapi import ChatEndpoint

app = FastAPI()

# Add chat endpoint
chat = ChatEndpoint(
    path="/chat",
    agent_name="assistant"
)
app.include_router(chat.router)

Custom Endpoints

from fastapi import APIRouter
from agentic_fleet.integrations.fastapi import AgentEndpoint

router = APIRouter()

@router.post("/analyze")
async def analyze_text(text: str):
    result = await agent.analyze(text)
    return {"analysis": result}

# Add to app
app.include_router(router)

WebSocket Support

from fastapi import WebSocket
from agentic_fleet.integrations.fastapi import WSManager

# Create WebSocket manager
ws_manager = WSManager()

@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    await ws_manager.connect(websocket)
    try:
        while True:
            data = await websocket.receive_text()
            response = await agent.process(data)
            await ws_manager.send_personal_message(
                response,
                websocket
            )
    except Exception as e:
        await ws_manager.disconnect(websocket)

Middleware

from agentic_fleet.integrations.fastapi import (
    RateLimitMiddleware,
    AuthMiddleware
)

# Add rate limiting
app.add_middleware(
    RateLimitMiddleware,
    limit=100,
    window=60
)

# Add authentication
app.add_middleware(
    AuthMiddleware,
    auth_url="/auth"
)

Request Validation

from pydantic import BaseModel

class ChatRequest(BaseModel):
    message: str
    context: dict = {}
    
@router.post("/chat")
async def chat(request: ChatRequest):
    response = await agent.chat(
        message=request.message,
        context=request.context
    )
    return response

Background Tasks

from fastapi import BackgroundTasks

@router.post("/process")
async def process(
    data: dict,
    background_tasks: BackgroundTasks
):
    # Add task to background
    background_tasks.add_task(
        agent.process_async,
        data
    )
    return {"status": "processing"}

Error Handling

from fastapi import HTTPException
from agentic_fleet.exceptions import AgentError

@router.post("/analyze")
async def analyze(text: str):
    try:
        result = await agent.analyze(text)
        return {"result": result}
    except AgentError as e:
        raise HTTPException(
            status_code=400,
            detail=str(e)
        )

API Documentation

# Configure OpenAPI
app = FastAPI(
    title="AgenticFleet API",
    description="Agent-powered API endpoints",
    version="1.0.0"
)

# Add documentation tags
router = AgentRouter(
    agent,
    tags=["agent"]
)

Best Practices

  1. Use proper error handling
  2. Implement rate limiting
  3. Add authentication
  4. Validate requests
  5. Document endpoints
  6. Monitor performance
  7. Handle background tasks
  8. Use WebSockets for real-time
  9. Follow FastAPI conventions