'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: conversation_service.py
* @Time: 2025/10/9
* @All Rights Reserve By Brtc
'''
import logging
from dataclasses import dataclass
from injector import inject
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from internal.entity.conversation_entity import SUMMARIZER_TEMPLATE, CONVERSATION_NAME_TEMPLATE, ConversationInfo, \
    SUGGESTED_QUESTION_TEMPLATE, SuggestedQuestion
from pkg.sqlachemy.sqlalchemy import SQLAlchemy
from .base_service import BaseService



@inject
@dataclass
class ConversationService(BaseService):
    """会话服务"""
    db:SQLAlchemy
    @classmethod
    def summary(cls, human_message:str, ai_message:str, old_summary:str = "")->str:
        """根据人类传递的消息, AI消息和原始的摘要总结生成一段新的摘要"""
        #1、创建 prompt
        prompt = ChatPromptTemplate.from_template(SUMMARIZER_TEMPLATE)
        #2、构建大语言模型实例, 并将大预言模型的温度调低， 降低幻觉的概率
        llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.5)
        #3、构建链应用
        summary_chain = prompt | llm | StrOutputParser()

        #4、调用链应用
        new_summary = summary_chain.invoke({
            "summary":old_summary,
            "new_lines":f"Human:{human_message}\nAI:{ai_message}"
        })
        return new_summary



    @classmethod
    def generate_conversation_name(cls, query:str)-> str:
        """根据传递的query 生成对应的名字,并且语言与用户保持的一致"""
        #1、创建prompt
        prompt = ChatPromptTemplate.from_messages([
            ("system", CONVERSATION_NAME_TEMPLATE),
            ("human", "{query}")
        ])

        #2、构建大预言模型实例, 并且将大语言模型的温度 调低，降低幻觉的概率
        llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
        structured_llm = llm.with_structured_output(ConversationInfo)

        #3、构建链应用
        chain = prompt | structured_llm

        #4、提取并整理query， 截取超度超过的部分
        if len(query) > 2000:
            query = query[:300] + "...[TRUNCATED]..." + query[-300:]
        query = query.replace("\n", " ")
        #5、调用链并获取会话信息
        conversation_info = chain.invoke({"query":query})
        #6、会话名称提取
        name = "新的会话"
        try:
            if conversation_info and hasattr(conversation_info, "subject"):
                name = conversation_info.subject
        except Exception as e:
            logging.exception(f"提取会话名称出错, conversation_info:{conversation_info}, 错误信息:{str(e)}")

        if len(name) > 75:
             name = name[:75]
        return name


    @classmethod
    def generated_suggested_questions(cls, histories:str)->list[str]:
        """根据传递的历史消息生成最多不超过3个问题的建议"""
        #1、创建prompt
        prompt = ChatPromptTemplate.from_messages([
            ("system", SUGGESTED_QUESTION_TEMPLATE),
            ("human", "{histories}")
        ])

        #2、构建大预言模型实例,并且将大语言模型温度调低， 降低幻觉的概率
        llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
        structured_llm = llm.with_structured_output(SuggestedQuestion)

        #3、构建链应用
        chain = prompt | structured_llm

        #4、调取链应用并获取建议问题列表
        suggested_questions = chain.invoke({"histories":histories})

        #5、提取建议问题列表
        questions = []

        try:
            if suggested_questions and  hasattr(suggested_questions, "questions"):
                questions = suggested_questions.questions
        except Exception as e:
            logging.exception(f"生成建议问题出错, suggested_questions:{suggested_questions}, 错误信息:{str(e)}")

        if len(questions) > 3:
            questions = questions[:3]

        return questions














