
import random
from channels.generic.websocket import WebsocketConsumer
from channels.exceptions import StopConsumer
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from .models import ServiceHost
from .utils import get_server_status
import json
from tools.faissdb import myfaiss
from tools.myredis import r
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Tongyi

# Memory 实例，用于多轮对话存储
from langchain.memory.chat_message_histories import ChatMessageHistory
from langchain.memory.buffer import ConversationBufferMemory

memory = ConversationBufferMemory(return_messages=True)
history = ChatMessageHistory()


class ChatConsumer(WebsocketConsumer):
    def connect(self):
        self.user_id = self.scope['url_route']['kwargs']['user_id']
        self.kefu_id = self.assign_kefu(self.user_id)
        self.room_name = f"user_{self.user_id}_kefu_{self.kefu_id}"

        async_to_sync(self.channel_layer.group_add)(
            self.room_name,
            self.channel_name
        )
        self.accept()

        async_to_sync(self.channel_layer.group_send)(
            self.room_name,
            {
                'type': 'system_message',
                'message': f"用户 {self.user_id} 已连接。",
                'user_id': self.user_id
            }
        )

    def disconnect(self, close_code):
        async_to_sync(self.channel_layer.group_discard)(
            self.room_name,
            self.channel_name
        )

    def receive(self, text_data):
        data = json.loads(text_data)
        question = data['message']

        # 调用 RAG 进行查询
        ai_response = self.process_rag_query(self.user_id, question)

        async_to_sync(self.channel_layer.group_send)(
            self.room_name,
            {
                'type': 'chat_message',
                'message': ai_response,
                'user_id': self.user_id
            }
        )

    def chat_message(self, event):
        self.send(text_data=json.dumps({
            'user_id': event['user_id'],
            'message': event['message']
        }))

    def system_message(self, event):
        self.send(text_data=json.dumps({
            'system_message': event['message']
        }))

    def assign_kefu(self, user_id):
        kefu_pool = ['kefu_1', 'kefu_2']
        assigned_kefu = r.hash_getone("user_to_kefu", user_id)
        if assigned_kefu:
            return assigned_kefu.decode()

        assigned_kefu = kefu_pool[len(r.hash_getall("user_to_kefu")) % len(kefu_pool)]
        r.hash_add("user_to_kefu", user_id, assigned_kefu)
        return assigned_kefu

    def process_rag_query(self, user_id, question):
        global memory

        # 查询相似问题
        ask_results = myfaiss.search(question, "ask", 3)
        similar_questions = [res.page_content for res in ask_results]

        # 查询答案
        all_answers = []
        for sq in similar_questions:
            res = myfaiss.search(sq, "movies", 3)
            all_answers.extend([r.page_content for r in res])

        # 使用 LLM 生成最终答案
        llm = Tongyi()
        input_message = f"用户问题：{question}\n相似问题：{similar_questions}\n答案：{all_answers}"
        template = """
           以下是用户的问题和检索到的答案，请基于检索内容生成最终的回答：
           用户问题：{input}
           """
        prompt = PromptTemplate(input_variables=["input"], template=template)
        chain = LLMChain(llm=llm, prompt=prompt, memory=memory)

        ai_response = chain.run(input=input_message)

        # 管理 Memory，只保留最近三轮对话
        memory.chat_memory.add_user_message(question)
        memory.chat_memory.add_ai_message(ai_response)

        if len(memory.chat_memory.messages) > 6:
            summary_prompt = """
               请总结以下对话为一个简单的问答对。
               格式要求：
               用户问题：<用户的问题>
               AI回答：<AI的回答>

               对话历史：
               {mes}
               """
            prompt_template = PromptTemplate.from_template(summary_prompt)
            summary_result = llm.invoke(prompt_template.format(mes=str(memory.chat_memory.messages[-6:])))

            try:
                # 重置 memory
                memory = ConversationBufferMemory(return_messages=True)

                # 解析总结内容
                lines = summary_result.split('\n')
                user_question = None
                ai_answer = None

                for i, line in enumerate(lines):
                    line = line.strip()
                    if line.startswith('用户问题：'):
                        user_question = line.replace('用户问题：', '').strip()
                    elif line.startswith('AI回答：'):
                        ai_answer = line.replace('AI回答：', '').strip()
                        break

                # 如果成功解析出问答对，添加到新的 memory 中
                if user_question and ai_answer:
                    memory.chat_memory.add_user_message(user_question)
                    memory.chat_memory.add_ai_message(ai_answer)

                # 添加当前轮对话
                memory.chat_memory.add_user_message(question)
                memory.chat_memory.add_ai_message(ai_response)

            except Exception as e:
                print(f"处理总结时出错: {str(e)}")
                # 如果出错，直接重置 memory 并保留当前对话
                memory = ConversationBufferMemory(return_messages=True)
                memory.chat_memory.add_user_message(question)
                memory.chat_memory.add_ai_message(ai_response)

        return ai_response



class ServerMonitorConsumer(WebsocketConsumer):
    def connect(self):
        self.group_name = "monitor"
        async_to_sync(self.channel_layer.group_add)(self.group_name, self.channel_name)
        self.accept()
        print("WebSocket 连接成功")

    def disconnect(self, close_code):
        async_to_sync(self.channel_layer.group_discard)(self.group_name, self.channel_name)

    def receive(self, text_data):
        print("Received data:", text_data)  # 确认接收到客户端的数据

        # 解析 JSON 数据
        try:
            data = json.loads(text_data)
            print("Parsed data:", data)
        except json.JSONDecodeError as e:
            print(f"JSON decode error: {e}")
            self.send(text_data=json.dumps({"error": "Invalid JSON"}))
            return

        # 获取所有服务器
        hosts = ServiceHost.objects.all()
        print(f"Found {len(hosts)} hosts")

        results = []
        for host in hosts:
            print(f"Checking host: {host.name}")

            # 获取 CPU 使用率
            cpu_usage = get_server_status(
                hostname=host.ip,
                port=host.port,
                username=host.username,
                password=host.password,
                command="top -bn1 | grep 'Cpu(s)' | awk '{print $2}'"
            )
            print(f"CPU usage for {host.name}: {cpu_usage}")

            # 随机生成温度
            temp = round(random.uniform(30.0, 80.0), 2)
            print(f"Simulated Temperature for {host.name}: {temp}")

            # 判断温度是否超过 60°C
            if temp > 60.0:
                warning = f"警告：服务器 {host.name} 温度过高（{temp}°C）！"
                print(warning)
            else:
                warning = ""

            # 组装结果
            results.append({
                "id": host.id,
                "name": host.name,
                "cpu_usage": cpu_usage,
                "temperature": temp,
                "warning": warning
            })

        # 将结果发送回客户端
        async_to_sync(self.channel_layer.group_send)(
            self.group_name,
            {
                'type': 'send_status',
                'data': results
            }
        )

    def send_status(self, event):
        self.send(text_data=json.dumps(event['data']))