from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from interview_voice_project.common import config_ini, get_logger
import asyncio
logger = get_logger(__name__)


# ============ 配置llm区域 ============
my_llm = ChatOpenAI(
    api_key=config_ini.LLM.DEEPSEEK_API_KEY,
    base_url=config_ini.LLM.DEEPSEEK_BASE_URL,
    model=config_ini.LLM.DEEPSEEK_MODEL,
    timeout=60,
    max_retries=3
)

_llm_concurrency = int((config_ini.VOICE_MODEL.MAX_CONCURRENCY or "3").strip())
_llm_semaphore = asyncio.Semaphore(_llm_concurrency)

async def llm_stream_messages(messages):
    async with _llm_semaphore:
        for chunk in my_llm.stream(messages):
            yield chunk

async def llm_invoke_messages(messages):
    async with _llm_semaphore:
        return my_llm.invoke(messages)

if __name__ == '__main__':
    # 构造对话消息
    messages = [
        HumanMessage(content="用一句话介绍一下你自己")
    ]
    # 调用模型
    response = my_llm.invoke(messages)
    print(response.content)
