Files
botbot/ai_service/main.py
2025-05-04 15:39:57 +01:00

67 lines
1.7 KiB
Python

import os
from dotenv import load_dotenv
from fastapi import FastAPI, Header, HTTPException
from pydantic import BaseModel
from openai import OpenAI
import logging
import redis
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
AI_TOKEN = os.environ["AI_HANDLER_TOKEN"]
AIclient = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
)
r = redis.Redis.from_url(os.environ.get("REDIS_URL", "redis://redis:6379"))
# --- Logging Setup ---
numeric_level = getattr(logging, LOG_LEVEL, logging.INFO)
logging.basicConfig(
level=numeric_level,
format="%(asctime)s %(levelname)s %(name)s: %(message)s"
)
logger = logging.getLogger(__name__)
class MessagePayload(BaseModel):
roomId: str
userId: str
eventId: str
serverTimestamp: int
content: str
app = FastAPI()
@app.post("/api/v1/message")
async def message(
payload: MessagePayload,
authorization: str = Header(None)
):
if authorization != f"Bearer {AI_TOKEN}":
raise HTTPException(status_code=401, detail="Unauthorized")
# Idempotency: ignore duplicates
if r.get(payload.eventId):
return {"reply": r.get(payload.eventId).decode()}
# Build prompt (very simple example)
prompt = f"User {payload.userId} said: {payload.content}\nBot:"
chat_response = AIclient.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
],
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
)
reply = chat_response.choices[0].message.content.strip()
# Cache reply for idempotency
r.set(payload.eventId, reply, ex=3600)
return {"reply": reply}