"""
配置文件，用于存储API和模型的配置信息
支持Hugging Face模型和GGUF格式模型
"""
import os
import logging
from dotenv import load_dotenv

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()
logger.info("已加载环境变量")

# API配置
API_HOST = os.getenv("API_HOST", "0.0.0.0")
API_PORT = int(os.getenv("API_PORT", "8000"))
API_DEBUG = os.getenv("API_DEBUG", "False").lower() == "true"

# 模型配置
MODEL_PATH = os.getenv("MODEL_PATH", "facebook/opt-1.3b")  # 默认使用较小的模型
MODEL_DEVICE = os.getenv("MODEL_DEVICE", "cuda")  # 默认使用CUDA设备
MODEL_PRECISION = os.getenv("MODEL_PRECISION", "fp16")  # 默认使用半精度
MODEL_MAX_LENGTH = int(os.getenv("MODEL_MAX_LENGTH", "512"))
MODEL_MAX_NEW_TOKENS = int(os.getenv("MODEL_MAX_NEW_TOKENS", "256"))
MODEL_TEMPERATURE = float(os.getenv("MODEL_TEMPERATURE", "0.7"))
MODEL_TOP_P = float(os.getenv("MODEL_TOP_P", "0.9"))
MODEL_TOP_K = int(os.getenv("MODEL_TOP_K", "50"))

# GGUF特定配置
GGUF_N_CTX = int(os.getenv("GGUF_N_CTX", "2048"))  # 上下文窗口大小
GGUF_N_BATCH = int(os.getenv("GGUF_N_BATCH", "512"))  # 批处理大小
GGUF_N_GPU_LAYERS = int(os.getenv("GGUF_N_GPU_LAYERS", "-1"))  # GPU层数，-1表示全部

# ROCm配置
ROCM_VISIBLE_DEVICES = os.getenv("ROCM_VISIBLE_DEVICES", "0")
os.environ["HIP_VISIBLE_DEVICES"] = ROCM_VISIBLE_DEVICES

logger.info(f"模型路径: {MODEL_PATH}")
logger.info(f"设备: {MODEL_DEVICE}")
logger.info(f"ROCm可见设备: {ROCM_VISIBLE_DEVICES}")
