from langchain_openai import ChatOpenAI
from core.config import settings
from langchain_community.embeddings import DashScopeEmbeddings

def get_default_llm():
    return ChatOpenAI(
        model_name=settings.LLM_MODEL,
        temperature=settings.LLM_TEMPERATURE,
        api_key=settings.LLM_API_KEY,
        base_url=settings.LLM_BASE_URL,
        streaming=settings.LLM_STREAMING,
        timeout=60,  # 增加超时时间
        max_retries=2,  # 减少重试次数
        request_timeout=60, # 请求超时
        max_tokens=4000  # 增加最大token数，足够生成完整试卷
        )

def get_embedding_llm():
    return DashScopeEmbeddings(
        model=settings.EMBEDDING_MODEL,
        dashscope_api_key=settings.LLM_API_KEY,
        max_retries=2  # 减少重试次数
    )
    
    
