#!/usr/bin/env python3
# coding: utf-8
# File: chatbot_graph.py
# Author: lixx
# Date: 24-12-23

from question_classifier import *
from question_parser import *
from answer_search import *
from openai import OpenAI

'''问答类'''


class ChatBotGraph:
    def __init__(self):
        self.classifier = QuestionClassifier()
        self.parser = QuestionPaser()
        self.searcher = AnswerSearcher()
        # 初始化LMStudio客户端
        self.llm_client = OpenAI(
            base_url="http://localhost:1234/v1", 
            api_key="lm-studio"
        )

    def get_llm_response(self, question, context=None):
        """从LMStudio获取回答"""
        try:
            messages = [
                {"role": "system", "content": "你是一个专业的医疗助手，请根据用户的问题提供准确、专业的医疗建议。"},
            ]
            
            if context:
                # 如果有Neo4j检索的上下文，将其加入提示中
                messages.append({"role": "system", "content": f"参考信息：{context}"})
            
            messages.append({"role": "user", "content": question})

            completion = self.llm_client.chat.completions.create(
                model="model/HuaTuo-7B",  # 使用华佗模型
                messages=messages,
                temperature=0.7,
            )
            return completion.choices[0].message.content
        except Exception as e:
            print(f"LLM调用出错: {str(e)}")
            return None

    def chat_main(self, sent):
        # 默认回答
        default_answer = '您好，我是小李医药智能助理，希望可以帮到您。祝您身体棒棒！'
        
        # 先通过知识图谱获取答案
        res_classify = self.classifier.classify(sent)
        if res_classify:
            res_sql = self.parser.parser_main(res_classify)
            kg_answers = self.searcher.search_main(res_sql)
            
            if kg_answers:
                # 如果知识图谱有答案，用LLM增强回答
                kg_context = '\n'.join(kg_answers)
                llm_enhanced = self.get_llm_response(sent, context=kg_context)
                if llm_enhanced:
                    return llm_enhanced
                return kg_context
        
        # 如果知识图谱没有答案，使用LLM回答
        llm_answer = self.get_llm_response(sent)
        if llm_answer:
            return llm_answer
            
        return default_answer


if __name__ == '__main__':
    handler = ChatBotGraph()
    # 问答测试当输入exit退出
    while True:
        question = input("用户(输入exit退出)：")
        if question == 'exit':
            print('感谢您的咨询，期待再次为您服务。')
            break
        answer = handler.chat_main(question)
        print('小李：', answer)

        # while 1:
        #     question = input('用户:')
        #     answer = handler.chat_main(question)
        #     print('小李:', answer)
