#!/usr/bin/env python3
# coding: utf-8
# File: chatbot_graph.py
# Author: lihc
# Date: 24-12-23

from question_classifier import *
from question_parser import *
from answer_search import *
from llm_qa import chat_input, get_response, get_stream_response
import json
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,  # 只保留INFO及以上级别日志
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('chatbot.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

'''问答类'''

class ChatBotGraph:
    def __init__(self):
        self.classifier = QuestionClassifier()
        self.parser = QuestionPaser()
        self.searcher = AnswerSearcher()

    def chat_main(self, sent, stream=False):
        # 默认回答
        default_answer = '您好，我是小李医药智能助理，希望可以帮到您。祝您身体棒棒！'
        
        try:
            # 1. 先通过知识图谱获取答案
            logger.info("开始查询知识图谱...")
            res_classify = self.classifier.classify(sent)
            if not res_classify:
                logger.info("知识图谱分类失败，直接使用大模型回答")
                if stream:
                    logger.info("开始流式调用大模型...")
                    try:
                        input_data = chat_input(text=sent, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                        for chunk in get_stream_response(input_data):
                            if chunk and 'choices' in chunk and len(chunk['choices']) > 0:
                                content = chunk['choices'][0].get('delta', {}).get('content', '')
                                if content:
                                    yield content
                    except Exception as e:
                        logger.error(f"流式调用大模型时出错: {str(e)}")
                        yield default_answer
                else:
                    try:
                        input_data = chat_input(text=sent, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                        response = get_response(input_data)
                        return response
                    except Exception as e:
                        logger.error(f"调用大模型时出错: {str(e)}")
                        return default_answer
                return

            logger.info(f"知识图谱分类结果: {res_classify}")
            res_sql = self.parser.parser_main(res_classify)
            if not res_sql:
                logger.info("知识图谱解析失败，直接使用大模型回答")
                if stream:
                    logger.info("开始流式调用大模型...")
                    try:
                        input_data = chat_input(text=sent, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                        for chunk in get_stream_response(input_data):
                            if chunk and 'choices' in chunk and len(chunk['choices']) > 0:
                                content = chunk['choices'][0].get('delta', {}).get('content', '')
                                if content:
                                    yield content
                    except Exception as e:
                        logger.error(f"流式调用大模型时出错: {str(e)}")
                        yield default_answer
                else:
                    try:
                        input_data = chat_input(text=sent, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                        response = get_response(input_data)
                        return response
                    except Exception as e:
                        logger.error(f"调用大模型时出错: {str(e)}")
                        return default_answer
                return

            logger.info(f"知识图谱SQL: {res_sql}")
            kg_answers = self.searcher.search_main(res_sql)
            
            # 2. 构建提示词
            prompt = "你是一个专业的医疗助手，请根据以下知识图谱信息回答用户的问题：\n\n"
            if kg_answers:
                prompt += "知识图谱信息：\n" + "\n".join(kg_answers) + "\n\n"
            prompt += f"用户问题：{sent}\n\n请基于知识图谱信息，给出专业、准确的回答。"
            
            logger.info(f"构建的提示词: {prompt}")
            
            # 3. 调用大模型
            if stream:
                logger.info("开始流式调用大模型...")
                try:
                    input_data = chat_input(text=prompt, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                    for chunk in get_stream_response(input_data):
                        if chunk and 'choices' in chunk and len(chunk['choices']) > 0:
                            content = chunk['choices'][0].get('delta', {}).get('content', '')
                            if content:
                                yield content
                except Exception as e:
                    logger.error(f"流式调用大模型时出错: {str(e)}")
                    yield default_answer
            else:
                try:
                    input_data = chat_input(text=prompt, system="你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。", history="")
                    response = get_response(input_data)
                    return response
                except Exception as e:
                    logger.error(f"调用大模型时出错: {str(e)}")
                    return default_answer
                
        except Exception as e:
            logger.error(f"处理问题时出错: {str(e)}")
            if stream:
                yield default_answer
            else:
                return default_answer


if __name__ == '__main__':
    handler = ChatBotGraph()
    # 问答测试当输入exit退出
    while True:
        question = input("用户(输入exit退出)：")
        if question == 'exit':
            print('感谢您的咨询，期待再次为您服务。')
            break
        
        # 使用流式输出
        print('小李：', end='', flush=True)
        for chunk in handler.chat_main(question, stream=True):
            print(chunk, end='', flush=True)
        print()  # 换行
