'''
Audio Recognition Task Client
这个服务用于接收语音识别任务，并将识别结果发送给调用方。
可以运行于没有公网 IP 的环境，通过 WebSocket 连接到任务分发服务器。

terrence@tenclass.com
2024-09
'''

import os
import sys
import json
import uuid
import time
import queue
import asyncio
import logging
import re
import threading
import websocket
from typing import Dict, Any
from collections import deque
from util import is_segment
from dialogue import Message, Dialogue
from util import get_string_no_punctuation_or_emoji,remove_punctuation_and_length
from concurrent.futures import ThreadPoolExecutor, TimeoutError

sys.stdout.reconfigure(encoding='utf-8')  # ✅ 让 print() 正确处理 UTF-8 字符

import numpy as np
import llm
from logger import setup_logging
from settings import load_config

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
# 加载环境变量
from dotenv import load_dotenv
load_dotenv()
# 常量定义
LLM_TASK_SERVER_URL = "ws://localhost:8085"
#LLM_TASK_SERVER_URL = os.getenv("LLM_TASK_SERVER_URL")


class LlmWorker:
    def __init__(self, wsapp, session_id, config):
        self.wsapp = wsapp
        self.session_id = session_id
        self.config = config
        self.prompt = None
        # 客户端状态相关
        self.client_abort = False
        self.client_listen_mode = "auto"
        # llm相关变量
        self.llm_finish_task = False
        self.dialogue = Dialogue()

        # 线程任务相关
        self.loop = asyncio.get_event_loop()
        self.stop_event = threading.Event()
        self.tts_queue = queue.Queue()
        self.executor = ThreadPoolExecutor(max_workers=5)
        self.scheduled_tasks = deque()
        logging.info(f'LLM agent : {self.config["selected_module"]["LLM"]}')
        self.llm = llm.create_instance(
                self.config["selected_module"]["LLM"],
                self.config["LLM"][self.config["selected_module"]["LLM"]],
            )
        
        self.init_param()

    def init_param(self):
        self.loop.run_in_executor(None, self._initialize_components)

    def _initialize_components(self):
        self.prompt = self.config["prompt"]
        # 赋予LLM时间观念
        if "{date_time}" in self.prompt:
            date_time = time.strftime("%Y-%m-%d %H:%M", time.localtime())
            self.prompt = self.prompt.replace("{date_time}", date_time)
        self.dialogue.put(Message(role="user", content=self.prompt))    

    def chat(self, query):
        self.dialogue.put(Message(role="user", content=query))
        response_message = []
        start = 0
        # 提交 LLM 任务
        try:
            start_time = time.time()  # 记录开始时间
            llm_responses = self.llm.response(self, self.dialogue.get_llm_dialogue())
        except Exception as e:
            self.logger.error(f"LLM 处理出错 {query}: {e}")
            return None
        # 提交 TTS 任务到线程池
        self.llm_finish_task = False
        sentences = []
        for content in llm_responses:
            response_message.append(content)
            # 如果中途被打断，就停止生成
            if self.client_abort:
                start = len(response_message)
                break

            end_time = time.time()  # 记录结束时间
            #logging.info(f"大模型返回时间时间: {end_time - start_time} 秒, 生成token={content}")
            if is_segment(response_message):
                segment_text = "".join(response_message[start:])
                segment_text = get_string_no_punctuation_or_emoji(segment_text)
                # if len(segment_text) > 0:
                #      self.reply('start',segment_text)
                #print(segment_text)
        # 处理剩余的响应
        if start < len(response_message):
            segment_text = "".join(response_message[start:])
            #segment_text = get_string_no_punctuation_or_emoji(segment_text)
            # if len(segment_text) > 0:
            #     print(segment_text)
            sentences = re.findall(r'[^。？！]+[。？！]', segment_text)
            # **循环打印**
            if len(sentences) == 1:
                self.reply('start', -1, sentence)  # ✅ 第一个用 'start'
            else:
                for i, sentence in enumerate(sentences):
                    print(f"句子 {i}: {sentence}")
                    if i == 0:
                        self.reply('start', i, sentence)  # ✅ 第一个用 'start'
                    elif i == len(sentences) - 1:
                        self.reply('end', i, sentence)  # ✅ 最后一个用 'end'
                    else:
                        self.reply('next', i, sentence)  # ✅ 中间用 'next'

        #logging.info(f'LLM response: {response_message}')
        # 更新对话
        self.dialogue.put(Message(role="assistant", content="".join(response_message)))
        #logging.info(json.dumps(self.dialogue.get_llm_dialogue(), indent=2, ensure_ascii=False))
        return True
    
    def on_asr_content(self, text):
        logging.info("Add sentece words: " + text)
        self.executor.submit(self.chat, text)

    def reset(self):
        self.listening = False
        self.content = ''


    def reply(self, state, cnt, content):
        if content == '':
            logging.info(f'Ignore empty content {self.content}')
            self.reset()
            return
        
        message = {
            'type': 'llm',
            'timeStamp': int(time.time() * 1000),
            'state' : state,
            'cnt' : cnt,
            'session_id': self.session_id,
            'content': content
        }
        #logging.info(f'Send message: {json.loads(message)}', indent=2, ensure_ascii=False)
        self.wsapp.send(json.dumps(message).encode())
        self.reset()

    
    def start(self, mode):
        self.listening = True
    
    def stop(self):
        self.listening = False


class LlmTaskClient:
    def __init__(self):
        self.workers = {}
        self.wsapp = None
        setup_logging()  # 最先初始化日志
        self.config = load_config()

    def initialize(self):
        logging.info("task client init")

    def get_worker(self, session_id):
        if session_id not in self.workers:
            self.workers[session_id] = LlmWorker(self.wsapp, session_id, self.config)
        return self.workers[session_id]

    def parse_text_message(self, message):
        data = json.loads(message)
        print("Rec : ", data)
        session_id = data['session_id']
        if data['type'] == 'asr':
            worker = self.get_worker(session_id)
            state = data['state']
            if state == 'start':
                logging.info(f"Recv message:{data['text']}")
                worker.on_asr_content(data['text'])
            elif state == 'stop':
                worker.stop()
            logging.info(f'Worker {session_id} started {state}')
        elif data['type'] == 'finish':
            if session_id in self.workers:
                del self.workers[session_id]
                logging.info(f'Worker {session_id} finished')
        else:
            logging.warning(f'Unknown message type: {data["type"]}')

    def on_message(self, wsapp, message):
        try:
            if isinstance(message, str):
                self.parse_text_message(message)
            else:
                print("Message error not support binary message")
        except Exception as e:
            logging.error(f"An error occurred: {e}", exc_info=True)

    def on_open(self, wsapp):
        logging.info('Connected to the Llm Task Server.')

    def run(self):
        logging.info('Starting Llm Task Client...')
        self.wsapp = websocket.WebSocketApp(
            LLM_TASK_SERVER_URL,
            on_message=self.on_message,
            on_open=self.on_open
        )

        while True:
            try:
                ret = self.wsapp.run_forever()
                if ret is None:
                    break
            except Exception as e:
                logging.error(f"An error occurred: {e}", exc_info=True)
            # Remove all workers
            self.workers = {}
            logging.info('Reconnecting to the Llm Task Server in 3 seconds...')
            time.sleep(3)


if __name__ == "__main__":
    task_client = LlmTaskClient()
    task_client.initialize()
    task_client.run()

