from flask import Flask, render_template, jsonify, request
from flask_socketio import SocketIO
from flask_cors import CORS  # 新增导入
import threading
import time
import os
import webrtcvad
import pyaudio
import numpy as np
import wave
import pygame
import asyncio
import edge_tts
import langid
from transformers import AutoModelForCausalLM, AutoTokenizer
from funasr import AutoModel

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app)  # 新增代码，启用跨域支持


AUDIO_RATE = 16000
AUDIO_CHANNELS = 1
CHUNK = 1024
VAD_MODE = 2
OUTPUT_DIR = "./output"
NO_SPEECH_THRESHOLD = 2.0
folder_path = "./Test_QWen2_VL/"
audio_file_count = 0

recording_active = False
audio_thread = None
start_event = threading.Event()
vad = webrtcvad.Vad()
vad.set_mode(VAD_MODE)

os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(folder_path, exist_ok=True)

model_dir = r"F:\SenseVoiceSmall"
model_senceVoice = AutoModel(model=model_dir, trust_remote_code=True)

# model_name = r"F:\Qwen2.5-1.5B-Instruct"
# model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
# tokenizer = AutoTokenizer.from_pretrained(model_name)

import requests
import json

# 定义全局变量
WORKFLOW_URL = 'http://localhost/v1/chat-messages'
API_KEY = 'app-uKA8h69lZxIjoIDELJc0SNL4'
HEADERS = {
    'Authorization': f'Bearer {API_KEY}',
    'Content-Type': 'application/json'
}
conversation_id = None  # 初始 conversation_id 为 None
def run_workflow(user_input, conversation_id=None):
    print(f'用户的问题是 {user_input}')
    url = WORKFLOW_URL  # 使用新的 URL
    headers = HEADERS  # 使用新的 API_KEY 和 headers

    data = {
        "inputs": {},
        "query": user_input,  # 将用户输入作为 query 参数
        "response_mode": "blocking",  # 使用 blocking 模式
        "conversation_id": conversation_id,  # 使用传入的 conversation_id，第一次为 None
        "user": "abc-123",
        "files": [
            {
                "type": "image",
                "transfer_method": "remote_url",
                "url": "https://cloud.dify.ai/logo/logo-site.png"  # 添加文件 URL
            }
        ]
    }

    try:
        response = requests.post(url, headers=headers, json=data)
        response.raise_for_status()

        if response.status_code == 200:
            response_data = response.json()
            answer = response_data.get("answer", "")
            new_conversation_id = response_data.get("conversation_id", "")

            if answer:
                # 如果 answer 不为空，直接返回纯文本结果和新的 conversation_id
                return answer, new_conversation_id
            else:
                # 如果 answer 为空，提示用户
                print("未获取到有效的回答内容。")
                return None, new_conversation_id
        else:
            print(f"请求失败，状态码: {response.status_code}")
            print(f"错误信息: {response.text}")
            return None, conversation_id
    except requests.exceptions.RequestException as e:
        print(f"请求发生错误: {e}")
        if isinstance(e, requests.exceptions.HTTPError):
            print(f"错误信息: {e.response.text}")
        return None, conversation_id
    
class AudioProcessor:
    def __init__(self):
        global audio_thread
        self.recording_active = False
        self.last_active_time = time.time()
        self.segments_to_save = []
        self.saved_intervals = []
        self.last_vad_end_time = 0
        self.audio_queue = []
        self.audio_file_count = 0
        self.stop_playback_flag = False
        self.play_thread = None
        # 启动音频录制线程
        audio_thread = threading.Thread(target=self.audio_recorder)
        audio_thread.start()

    def check_vad_activity(self, audio_data):
        num, rate = 0, 0.4
        step = int(AUDIO_RATE * 0.02)
        flag_rate = round(rate * len(audio_data) // step)

        for i in range(0, len(audio_data), step):
            chunk = audio_data[i:i + step]
            if len(chunk) == step:
                if vad.is_speech(chunk, sample_rate=AUDIO_RATE):
                    num += 1

        return num > flag_rate

    def audio_recorder(self):
        start_event.wait()  # 等待启动事件
        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paInt16, channels=AUDIO_CHANNELS, rate=AUDIO_RATE, input=True, frames_per_buffer=CHUNK)
        audio_buffer = []
        print("音频录制已开始")

        while self.recording_active:
            data = stream.read(CHUNK)
            audio_buffer.append(data)

            if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
                raw_audio = b''.join(audio_buffer)
                vad_result = self.check_vad_activity(raw_audio)

                if vad_result:
                    self.last_active_time = time.time()
                    self.segments_to_save.append((raw_audio, time.time()))
                    socketio.emit('vad_status', {'status': 'active'})
                else:
                    socketio.emit('vad_status', {'status': 'inactive'})

                audio_buffer = []

            if time.time() - self.last_active_time > NO_SPEECH_THRESHOLD:
                if self.segments_to_save and self.segments_to_save[-1][1] > self.last_vad_end_time:
                    self.save_audio_video()
                    self.last_active_time = time.time()

        stream.stop_stream()
        stream.close()
        p.terminate()

    def save_audio_video(self):
        pygame.mixer.init()
        global audio_file_count
        audio_file_count += 1
        audio_output_path = f"{OUTPUT_DIR}/audio_{audio_file_count}.wav"

        if not self.segments_to_save:
            return

        if pygame.mixer.get_init() and pygame.mixer.music.get_busy():
            self.stop_playback_flag = True
            print("检测到新音频，触发播放中断")

        start_time = self.segments_to_save[0][1]
        end_time = self.segments_to_save[-1][1]

        if self.saved_intervals and self.saved_intervals[-1][1] >= start_time:
            self.segments_to_save.clear()
            return

        audio_frames = [seg[0] for seg in self.segments_to_save]

        wf = wave.open(audio_output_path, 'wb')
        wf.setnchannels(AUDIO_CHANNELS)
        wf.setsampwidth(2)
        wf.setframerate(AUDIO_RATE)
        wf.writeframes(b''.join(audio_frames))
        wf.close()

        socketio.emit('processing_status', {'status': 'processing'})
        self.inference(audio_output_path)

        self.saved_intervals.append((start_time, end_time))
        self.segments_to_save.clear()

    async def amain(self, TEXT, VOICE, OUTPUT_FILE):
        communicate = edge_tts.Communicate(TEXT, VOICE)
        await communicate.save(OUTPUT_FILE)

    def play_audio(self, file_path):
        if self.play_thread and self.play_thread.is_alive():
            print("尝试终止上一条播放线程")
            self.stop_playback_flag = True
            self.play_thread.join(timeout=2)

        self.stop_playback_flag = False
        self.play_thread = threading.Thread(target=self._play_audio_internal, args=(file_path,))
        self.play_thread.start()

    def _play_audio_internal(self, file_path):
        try:
            pygame.mixer.init()
            pygame.mixer.music.load(file_path)
            socketio.emit('playback_status', {'status': 'start'})
            pygame.mixer.music.play()

            while pygame.mixer.music.get_busy():
                if self.stop_playback_flag:
                    pygame.mixer.music.stop()
                    print("播放被打断")
                    socketio.emit('playback_status', {'status': 'end'})
                    break
                time.sleep(0.1)

            socketio.emit('playback_status', {'status': 'end'})
        except Exception as e:
            print(f"播放失败: {e}")
        finally:
            pygame.mixer.quit()

    def inference(self, TEMP_AUDIO_FILE):
        input_file = (TEMP_AUDIO_FILE)
        res = model_senceVoice.generate(input=input_file, cache={}, language="auto", use_itn=False)
        prompt = res[0]['text'].split(">")[-1]
        print("ASR OUT:", prompt)

        # 向前端推送socket流，显示用户提问文字   
        if prompt:
            socketio.emit('message_user', {'status': 'success', 'type': 'user', 'text': prompt})

        global conversation_id
        if prompt is not None and prompt.strip():  # 确保 prompt 不为空且不是只包含空白字符
            output_text, conversation_id = run_workflow(prompt, conversation_id=conversation_id)
            print("answer", output_text)
        else:
            output_text = "请重新提问"

       

        # 向前端推送socket流，显示用户提问文字
        if output_text:
            socketio.emit('message_llm', {'status': 'success', 'type': 'llm', 'text': output_text})

        language, _ = langid.classify(output_text)

        language_speaker = {
            "zh": "zh-CN-KangkangNeural"
        }

        used_speaker = language_speaker.get(language, "zh-CN-KangkangNeural")
        print(f"检测到语种：{language}，使用音色：{used_speaker}")

        output_file = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
        asyncio.run(self.amain(output_text, used_speaker, output_file))
        self.play_audio(output_file)
    
    def force_stop_all(self):
        self.recording_active = False
        self.segments_to_save.clear()
        self.stop_playback_flag = True
        try:
            if pygame.mixer.get_init() and pygame.mixer.music.get_busy():
                pygame.mixer.music.stop()
                print("已强制停止音频播放")
        except:
            pass

audio_processor = AudioProcessor()

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/start_recording', methods=['POST'])
def start_recording():
    global audio_thread, audio_processor

    if not audio_processor.recording_active:
        audio_processor.recording_active = True
        # audio_thread = threading.Thread(target=audio_processor.audio_recorder)
        # audio_thread.start()
        start_event.set()
        return jsonify({'status': 'started'})
    return jsonify({'status': 'already_running'})

@app.route('/stop_recording', methods=['POST'])
def stop_recording():
    global audio_processor, audio_thread

    if audio_processor.recording_active:
        # 停止录音
        audio_processor.recording_active = False

        # 强制停止音频播放
        audio_processor.force_stop_all()

        # 等待录音线程停止
        if audio_thread and audio_thread.is_alive():
            audio_thread.join()

        return jsonify({'status': 'stopped'})
    
    return jsonify({'status': 'not_running'})


@app.route('/force_stop', methods=['POST'])
def force_stop():
    audio_processor.force_stop_all()
    return jsonify({'status': 'forced_stopped'})


if __name__ == '__main__':
    socketio.run(app, debug=True, port=5500)
