ch4rly-api / app /config.py
ikasplay
Añadido: max_new_tokens
911e59f
raw
history blame contribute delete
642 Bytes
import os
from dotenv import load_dotenv
import torch
load_dotenv()
HF_ENV_TOKEN_HAGGING_FACE = os.getenv("HF_TOKEN")
HF_ENV_MODEL_NAME = os.getenv("HF_MODEL_NAME")
# 📝 Parámetros para chat
CHAT_PARAM_MAX_LENGTH = 256
CHAT_MAX_NEW_TOKENS = 512
CHAT_PARAM_DO_SAMPLE = True
CHAT_PARAM_TEMPERATURE = 0.5
CHAT_PARAM_TOP_P = 0.8
CHAT_PARAM_TOP_K = 50
CHAT_LENGTH_PENALTY = 0.8
CHAT_REPETITION_PENALTY = 1.2
CHAT_DEVICE_MAP = "cuda" if torch.cuda.is_available() else "cpu"
CHAT_TORCH_DTYPE = "auto"
KWARGS_DO_SAMPLE = False
KWARGS_TEMPERATURE = 0.7
KWARGS_TOP_P = 0.9
KWARGS_TOP_K = 50
KWARGS_MAX_LENGTH = 512
KWARGS_REPETITION_PENALTY = 1.2