# -*- coding: utf-8 -*-
"""
实时流式识别 (从 MP3 文件进行转换) 并在处理完每个音频文件后将最终结果输入 aidemo, 依次处理 aud 文件夹下的所有音频文件
需要安装 websocket-client, librosa 和 soundfile 库
"""
import websocket
import time
import uuid
import json
import logging
import sys
import os
from openai import OpenAI
import re
from source import const
from source import audio
import pyaudio  # 导入 pyaudio 库
import pyttsx3
import threading

p = pyaudio.PyAudio()
engine = pyttsx3.init()  # 初始化引擎

current_dir = os.path.dirname(os.path.abspath(__file__))
aiinput = ""  # 重置 aiinput
PROMPT_DIR = os.path.join(current_dir, "prompt")
position_map = {
    1: (43, 158),  # 向日葵
    2: (101, 158),  # 豌豆射手
    3: (152, 151),  # 寒冰豌豆射手
    4: (207, 164),  # 坚果墙
    5: (257, 156),  # 樱桃炸弹
    6: (312, 161),  # 三向豌豆射手
    7: (363, 157),  # 双发射手
    8: (415, 165),  # 食人花
    9: (45, 234),  # 喷射蘑菇
    10: (98, 234),  # 土豆地雷
    11: (149, 234),  # 南瓜
    12: (200, 233),  # 杂草地刺
    13: (257, 235),  # 愤怒辣椒
    14: (307, 233),  # 胆小菇
    15: (365, 234),  # 阳光菇
    16: (414, 237),  # 冰冻蘑菇
    17: (45, 311),  # 诱惑蘑菇
}
plant_map = {
    1: "向日葵",
    2: "豌豆射手",
    3: "寒冰豌豆射手",
    4: "坚果墙",
    5: " 樱桃炸弹",
    6: "三向豌豆射手",
    7: "双发射手",
    8: "食人花",
    9: "喷射蘑菇",
    10: "土豆地雷",
    11: "南瓜",
    12: "杂草地刺",
    13: "愤怒辣椒",
    14: "胆小菇",
    15: "阳光菇",
    16: "冰冻蘑菇",
    17: "诱惑蘑菇",
}
# 配置日志记录器
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

# OpenAI 客户端配置
client = OpenAI(
    api_key="yourkey",
    base_url="https://api.chatanywhere.tech/v1"
)

# 全局变量
current_page = 1
selected_plants = {}

def gpt_4o_api(messages: list):
    completion = client.chat.completions.create(model="deepseek-v3", messages=messages)
    return completion.choices[0].message.content

def analyze_and_respond(user_input: str):
    """分析用户输入并生成 aidemo 操作指令，同时返回语音信息"""
    global current_page, selected_plants
    prompt_file_name = f"{current_page}.txt"
    prompt_file_path = os.path.join(PROMPT_DIR, prompt_file_name)  # 生成prompt文件的绝对路径

    # 从文件中读取 prompt
    try:
        with open(prompt_file_path, "r", encoding="utf-8") as f:  # 新增：使用 UTF-8 编码读取文件
            prompt_template = f.read()  # 读取完整的prompt模板
    except FileNotFoundError:
        logger.error(f"Prompt文件 '{prompt_file_path}' 未找到。")
        return ((0, 0), [0, 0])
    except Exception as e:
        logger.error(f"读取Prompt文件时出错: {e}")
        return ((0, 0), [0, 0])

    # 将变量插入到prompt模板中
    prompt = prompt_template.format(
        selected_plants=selected_plants,
        user_input=user_input
    )

    messages = [{'role': 'user', 'content': prompt}, ]
    response = gpt_4o_api(messages)
    print(f"AI Output: {response}")  # 输出大模型的回复
    print(f"current_page: {current_page}")
    # 清理响应，去除多余的空格和换行符
    response = response.strip().replace('\n', '').replace('\r', '')

    parts = response.split(';')
    if len(parts) != 2:
        raise ValueError("格式错误，需要包含num和voice两部分")

    # 解析num部分
    num_part = parts[0].strip()

    print(f"num_part{num_part}")
    #num = int(num_part[4:])
    num = int(re.findall(r'\d+', num_part)[0])
    print(f"num{num}")

    # 解析voice部分
    voice_part = parts[1].strip()

    voice = voice_part[6:]
    engine.say(voice)

    # 运行，此语句不可少
    engine.runAndWait()
    print(f"voice{voice}")
    # 更新状态和输出
    if current_page == 1:
        if num==1:
            current_page = 2
            pos=(569,132)
            print(pos)
            return (pos, [1, 0])
        else:
            print("None")
            return((0,0),[1,0])

    elif current_page == 2:
        # 检查编号是否有效
        if num in position_map:
            pos = position_map[num]
            plant_name = plant_map[num]
            if plant_name not in selected_plants.values():  # 更高效的查找方式
                if selected_plants:
                    new_key = max(selected_plants.keys()) + 1
                else:
                    new_key = 1  # 空字典时从1开始
                selected_plants[new_key] = plant_name
                print(f"添加植物：{plant_name}, 位置：{pos}, key: {new_key}")  # 方便调试
            else:
                print(f"植物 {plant_name} 已经存在，不再添加。")
            print(selected_plants)
            print(pos)
            return(pos,[1,0])
        elif num == 18:
            pos =  (244, 563)
            if max(selected_plants.keys()) >= 8:
              current_page = 3
            print(pos)
            return (pos, [1, 0])
        else:
            print("None")
            return ((0, 0), [1, 0])

    elif current_page == 3:
        if num <=8 and num>=1:
            x = 55 * num +55
            y = 40
            pos = (x,y)
            print(pos)
            return(pos, [1,0])
        elif num == 9:
            pos = (36,30)
            print(pos)
            return(pos,[1,0])
        elif num>=10 and num <= 100:
            tens_digit = num // 10  # 十位
            ones_digit = num % 10  # 个位

            # 转换row（十位）
            row_map = {
                1: 120, 2: 220, 3: 320, 4: 420, 5: 520
            }
            # 转换hol（个位）
            hol_map = {
                1: 80, 2: 160, 3: 240, 4: 320, 5: 400,
                6: 480, 7: 560, 8: 640, 9: 720
            }

            # 检查有效性并转换
            if tens_digit not in row_map:
                raise ValueError(f"十位数 {tens_digit} 超出范围(1-5)")
            if ones_digit not in hol_map:
                raise ValueError(f"个位数 {ones_digit} 超出范围(1-9)")

            # 转换为对应的值
            row = row_map[tens_digit]
            hol = hol_map[ones_digit]
            pos = (hol,row)
            print(pos)
            return(pos,[1,0])
        else :
            print("None")
            return((0,0),[0,0])


def send_start_params(ws):
    """发送开始参数帧"""
    req = {
        "type": "START",
        "data": {
            "appid": const.APPID,
            "appkey": const.APPKEY,
            "dev_pid": const.DEV_PID,
            "cuid": "cuid-1",
            "sample": 16000,
            "format": "pcm"
        }
    }
    body = json.dumps(req)
    ws.send(body, websocket.ABNF.OPCODE_TEXT)


def send_audio(ws, pcm_data):
    """发送二进制音频数据"""
    chunk_ms = 160
    sample_rate = 16000
    chunk_len = int(sample_rate * 2 / 1000 * chunk_ms)
    index = 0
    total = len(pcm_data)

    while index < total:
        end = index + chunk_len
        if end >= total:
            end = total
        body = pcm_data[index:end]
        try:
            ws.send(body, websocket.ABNF.OPCODE_BINARY)
        except Exception as e:
            logger.error(f"发送音频数据时出错: {e}")
            return
        index = end
        time.sleep(chunk_ms / 1000.0)


def send_finish(ws):
    """发送结束帧"""
    req = {
        "type": "FINISH"
    }
    body = json.dumps(req)
    ws.send(body, websocket.ABNF.OPCODE_TEXT)


def send_cancel(ws):
    """发送取消帧"""
    req = {
        "type": "CANCEL"
    }
    body = json.dumps(req)
    ws.send(body, websocket.ABNF.OPCODE_TEXT)


def on_open(ws, pcm_data):
    """连接后发送数据帧"""

    def run(*args):
        """发送数据帧"""
        try:
            send_start_params(ws)
            send_audio(ws, pcm_data)
            send_finish(ws)
        except Exception as e:
            logger.error(f"音频流线程中发生错误: {e}")
        finally:
            logger.debug("线程终止")

    threading.Thread(target=run).start()


def on_message(ws, message):
    """接收服务端返回的消息, 并传递给 aidemo"""
    global aiinput
    try:
        data = json.loads(message)
        if "result" in data and data["result"]:
            # 将语音识别结果传递给 aidemo
            aiinput = data["result"]
            #logger.info(f"AI Input: {aiinput}")  # 输出AI input
        elif "err_msg" in data:
            logger.error(data["err_msg"])
    except json.JSONDecodeError:
        logger.error(f"无法解析 JSON 响应: {message}")
    except Exception as e:
        logger.error(f"处理消息时发生错误: {e}")


def on_error(ws, error):
    """库的报错，比如连接超时"""
    logger.error("错误: " + str(error))


def on_close(ws):
    """Websocket 关闭"""
    pass
def audtotxt():
    global aiinput
    logging.basicConfig(format='%(levelname)s: %(message)s')
    logger.setLevel(logging.INFO)

    try:
        # 1. 使用 record_audio 实时获取 PCM 数据
        pcm_data = audio.record_audio()
        if pcm_data is None:
            logger.error("录音失败，请检查麦克风设置。")

        # 2. 连接 WebSocket 并发送数据

        uri = const.URI + "?sn=" + str(uuid.uuid1())
        logger.info(f"正在连接到: {uri}")

        ws_app = websocket.WebSocketApp(
            uri,
            on_open=lambda ws: on_open(ws, pcm_data),
            on_message=lambda ws, msg: on_message(ws, msg),
            on_error=on_error,
            on_close=on_close
        )

        ws_app.run_forever()

        # 3. 处理语音识别结果
        logger.info(f"语音识别结果： '{aiinput}' 。")
    except KeyboardInterrupt:
        logger.info("程序被用户中断。")
    except Exception as e:
        logger.error(f"发生错误: {e}")
    return aiinput

if __name__ == "__main__":
    logging.basicConfig(format='%(levelname)s: %(message)s')
    logger.setLevel(logging.INFO)

    try:
        while True:
            # 1. 使用 record_audio 实时获取 PCM 数据
            logger.info("请按下空格键开始录音...")
            pcm_data = audio.record_audio()
            print("get_pcm")
            if pcm_data is None:
                logger.error("录音失败，请检查麦克风设置。")
                continue # 跳过本次循环

            # 2. 连接 WebSocket 并发送数据

            uri = const.URI + "?sn=" + str(uuid.uuid1())
            logger.info(f"正在连接到: {uri}")

            ws_app = websocket.WebSocketApp(
                uri,
                on_open=lambda ws: on_open(ws, pcm_data),
                on_message=lambda ws, msg: on_message(ws, msg),
                on_error=on_error,
                on_close=on_close
            )

            ws_app.run_forever()

            # 3. 处理语音识别结果
            logger.info(f"语音识别结果： '{aiinput}' 。")
            analyze_and_respond(aiinput)  # 将结果传递给 aidemo
            logger.info("aidemo 处理完成。")

            # 4. 重置全局变量
            aiinput = ""  # 重置 aiinput，为下一次录音做准备
    except KeyboardInterrupt:
        logger.info("程序被用户中断。")
    except Exception as e:
        logger.error(f"发生错误: {e}")
    finally:
        p.terminate() # 确保 PyAudio 资源被正确释放
        logger.info("程序已退出。")