import json
from langchain_ollama import ChatOllama
from src.common.logger import getLogger
from src.modules.memory.service import HistoryRecordService, MemoryDetailService

logger = getLogger()

def generate_response(args):
    query = args.get("query")

    HistoryRecordService.insert_history_memory(args)

    try:
        # chat_llm = ChatOpenAI(
        #     base_url='https://ms-fc-9a6febb8-015a.api-inference.modelscope.cn/v1',
        #     api_key='ms-758ddc9b-5512-45a6-a8e8-d1e9e4466d42',
        #     model="Qwen/Qwen3-0.6B-GGUF",
        #     streaming=True,
        # )
        chat_llm = ChatOllama(
            base_url="http://localhost:11434",
            model="qwen3:4b",
            temperature=0.3,
        )
        logger.info(f"generate_response chat_llm: {chat_llm}")

        # 调用OpenAI流式接口
        response = chat_llm.invoke(query)
        logger.info(f"generate_response response content len: {len(response.content)}")

        MemoryDetailService.insert_memory_detail_ai(args, { "chain_result": response.content })

        return { "chain_result": response.content }
    except Exception as e:
        logger.error(f"generate_response error: {str(e)}")
        return f"{json.dumps({'error': str(e)})}"
