chgpt / modules /llm.py
MGLDZM's picture
return error printing adjust
8059694 verified
import json, time
from hashlib import sha256
from fastapi import HTTPException
from openai import OpenAI
import tiktoken
from . import log_module, error_map, chat_functions as tools, settings
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from . import model
import asyncio
encoding = tiktoken.encoding_for_model(settings.GPT_MODEL)
def ejecutar(chat: "model.Chat", session: "model.Session"):
temp_messages = [msg.model_dump() for msg in chat.messages]
try:
client = OpenAI(
api_key=settings.OPENAI_API_KEY,
timeout=30.0,
max_retries=3
)
generated = client.chat.completions.create(
model=settings.GPT_MODEL,
messages=temp_messages,
temperature=session.configs.temperature,
frequency_penalty=session.configs.frequency_penalty,
presence_penalty=session.configs.presence_penalty,
tools= tools.functions if session.configs.useTool else None,
stream=True,
user=sha256(session.gid.encode('UTF-8')).hexdigest()
)
return generated
except Exception as error:
log_module.logger(session.gid).error(repr(error) + " - " + session.gid)
raise HTTPException( **error_map.error_table.get(type(error), error_map.error_table["undefined"]))
async def streamer(chat: "model.Chat", session: "model.Session", sub_exec:bool = False):
response_async = ejecutar(chat, session)
if not sub_exec:
yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
message = None
role = None
chunk = next(response_async)
role = chunk.choices[0].delta.role
if isinstance(chunk.choices[0].delta.content, str):
message = chat.new_msg(role, response_async)
chat.append(message)
else:
message = chat.new_func(role, response_async, chunk)
if not sub_exec:
yield json.dumps({"comando": "status", "status":{"mensaje":"Buscando en google o algo así", "modo": "reemplazar"}})
message.exec(session.gid)
chat.append(message)
async for r_async in streamer(chat, session, True):
yield r_async
if not sub_exec:
session.update_usage(chat.tokens)
log_module.logger(session.gid).info(f"Chat used, tokens: {chat.tokens}")
yield json.dumps({"comando":"challenge", "challenge": session.challenge} )
yield json.dumps({"comando":"token", "token": session.create_cookie_token() } )
yield json.dumps({"comando":"mensaje", "mensaje": chat.messages[-1].model_dump()} )