WIP: tunning
This commit is contained in:
@@ -32,12 +32,15 @@ async def message(
|
||||
|
||||
# Build prompt (very simple example)
|
||||
prompt = f"User {payload.userId} said: {payload.content}\nBot:"
|
||||
resp = openai.Completion.create(
|
||||
model="text-davinci-003",
|
||||
prompt=prompt,
|
||||
max_tokens=150
|
||||
chat_response = opeai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "system", "content": "-"},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
temperature=0.7,
|
||||
)
|
||||
reply = resp.choices[0].text.strip()
|
||||
reply = chat_response.choices[0].text.strip()
|
||||
|
||||
# Cache reply for idempotency
|
||||
r.set(payload.eventId, reply, ex=3600)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
python-dotenv>=1.0.0
|
||||
openai
|
||||
openai>=1.0.0
|
||||
fastapi>=0.95
|
||||
uvicorn>=0.22
|
||||
redis>=4.5
|
||||
|
||||
@@ -86,9 +86,14 @@ async def main() -> None:
|
||||
}
|
||||
headers = {"Authorization": f"Bearer {AI_HANDLER_TOKEN}"}
|
||||
async with httpx.AsyncClient() as http:
|
||||
resp = await http.post(f"{AI_HANDLER_URL}/api/v1/message", json=payload, headers=headers)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
try:
|
||||
resp = await http.post(f"{AI_HANDLER_URL}/api/v1/message", json=payload, headers=headers)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"HTTP error: {e.response.status_code} - {e.response.text}")
|
||||
except Exception:
|
||||
logger.exception("Error while calling AI handler")
|
||||
if data.get("reply"):
|
||||
client.send_message(event["room_id"], data["reply"])
|
||||
|
||||
|
||||
Reference in New Issue
Block a user