WIP: moving to services, refining matrix_service

This commit is contained in:
2025-05-03 01:56:55 +01:00
parent a22ec0a7da
commit fb578fbf40
9 changed files with 233 additions and 142 deletions

12
ai_service/Dockerfile Normal file
View File

@@ -0,0 +1,12 @@
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY main.py ./
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]

45
ai_service/main.py Normal file
View File

@@ -0,0 +1,45 @@
import os
from dotenv import load_dotenv
from fastapi import FastAPI, Header, HTTPException
from pydantic import BaseModel
import openai
import redis
AI_TOKEN = os.environ["AI_HANDLER_TOKEN"]
openai.api_key = os.environ["OPENAI_API_KEY"]
r = redis.Redis.from_url(os.environ.get("REDIS_URL", "redis://redis:6379"))
class MessagePayload(BaseModel):
roomId: str
userId: str
content: str
eventId: str
timestamp: int
app = FastAPI()
@app.post("/api/v1/message")
async def message(
payload: MessagePayload,
authorization: str = Header(None)
):
if authorization != f"Bearer {AI_TOKEN}":
raise HTTPException(status_code=401, detail="Unauthorized")
# Idempotency: ignore duplicates
if r.get(payload.eventId):
return {"reply": r.get(payload.eventId).decode()}
# Build prompt (very simple example)
prompt = f"User {payload.userId} said: {payload.content}\nBot:"
resp = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=150
)
reply = resp.choices[0].text.strip()
# Cache reply for idempotency
r.set(payload.eventId, reply, ex=3600)
return {"reply": reply}

View File

@@ -0,0 +1,6 @@
python-dotenv>=1.0.0
openai
fastapi>=0.95
uvicorn>=0.22
redis>=4.5
pydantic>=1.10