from flask import Flask, render_template, jsonify, request
from flask_socketio import SocketIO
from flask_cors import CORS
import threading
import time
import os
import webrtcvad
import pyaudio
import numpy as np
import wave
import pygame
import asyncio
import edge_tts
import langid
from transformers import AutoModelForCausalLM, AutoTokenizer
from funasr import AutoModel
import requests
import json

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app)

AUDIO_RATE = 16000
AUDIO_CHANNELS = 1
CHUNK = 1024
VAD_MODE = 1
OUTPUT_DIR = "./output"
NO_SPEECH_THRESHOLD = 2.0
folder_path = "./Test_QWen2_VL/"
audio_file_count = 0

recording_active = False
audio_thread = None
start_event = threading.Event()
vad = webrtcvad.Vad()
vad.set_mode(VAD_MODE)
flag_first_time = True
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(folder_path, exist_ok=True)

model_dir = r"F:\SenseVoiceSmall"
model_senceVoice = AutoModel(model=model_dir, trust_remote_code=True)

WORKFLOW_URL = 'http://localhost/v1/chat-messages'
API_KEY = 'app-uKA8h69lZxIjoIDELJc0SNL4'
HEADERS = {
    'Authorization': f'Bearer {API_KEY}',
    'Content-Type': 'application/json'
}
conversation_id = None

def run_workflow(user_input, conversation_id=None):
    print(f'用户的问题是 {user_input}')
    data = {
        "inputs": {},
        "query": user_input,
        "response_mode": "blocking",
        "conversation_id": conversation_id,
        "user": "abc-123",
        "files": [
            {
                "type": "image",
                "transfer_method": "remote_url",
                "url": "https://cloud.dify.ai/logo/logo-site.png"
            }
        ]
    }

    try:
        response = requests.post(WORKFLOW_URL, headers=HEADERS, json=data)
        response.raise_for_status()
        response_data = response.json()
        answer = response_data.get("answer", "")
        new_conversation_id = response_data.get("conversation_id", "")
        return answer, new_conversation_id
    except requests.exceptions.RequestException as e:
        print(f"请求发生错误: {e}")
        return None, conversation_id

class AudioProcessor:
    def __init__(self):
        self.recording_active = False
        self.last_active_time = time.time()
        self.segments_to_save = []
        self.saved_intervals = []
        self.last_vad_end_time = 0
        self.audio_queue = []
        self.audio_file_count = 0
        self.stop_playback_flag = False
        self.play_thread = None
        self.start_audio_thread()

    def start_audio_thread(self):
        global audio_thread
        if not audio_thread or not audio_thread.is_alive():
            audio_thread = threading.Thread(target=self.audio_recorder)
            audio_thread.start()

    def check_vad_activity(self, audio_data):
        num, rate = 0, 0.4
        step = int(AUDIO_RATE * 0.02)
        flag_rate = round(rate * len(audio_data) // step)
        for i in range(0, len(audio_data), step):
            chunk = audio_data[i:i + step]
            if len(chunk) == step and vad.is_speech(chunk, sample_rate=AUDIO_RATE):
                num += 1
        return num > flag_rate
    def audio_recorder(self):
        start_event.wait()
        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paInt16, channels=AUDIO_CHANNELS, rate=AUDIO_RATE, input=True, frames_per_buffer=CHUNK)
        audio_buffer = []
        print("音频录制已开始")
        try:
            while self.recording_active:
                data = stream.read(CHUNK)
                audio_buffer.append(data)
                if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
                    raw_audio = b''.join(audio_buffer)
                    vad_result = self.check_vad_activity(raw_audio)
                    if vad_result:
                        self.last_active_time = time.time()
                        self.segments_to_save.append((raw_audio, time.time()))
                        socketio.emit('vad_status', {'status': 'active'})
                    else:
                        socketio.emit('vad_status', {'status': 'inactive'})
                    audio_buffer = []
                if time.time() - self.last_active_time > NO_SPEECH_THRESHOLD:
                    if self.segments_to_save and self.segments_to_save[-1][1] > self.last_vad_end_time:
                        self.save_audio_video()
                        self.last_active_time = time.time()
        finally:
            print("音频录制线程正在退出...")
            stream.stop_stream()
            stream.close()
            p.terminate()
            print("音频录制线程已退出")



    def save_audio_video(self):
        global audio_file_count
        audio_file_count += 1
        audio_output_path = f"{OUTPUT_DIR}/audio_{audio_file_count}.wav"
        if not self.segments_to_save:
            return
        if self.saved_intervals and self.saved_intervals[-1][1] >= self.segments_to_save[0][1]:
            self.segments_to_save.clear()
            return
        audio_frames = [seg[0] for seg in self.segments_to_save]
        wf = wave.open(audio_output_path, 'wb')
        wf.setnchannels(AUDIO_CHANNELS)
        wf.setsampwidth(2)
        wf.setframerate(AUDIO_RATE)
        wf.writeframes(b''.join(audio_frames))
        wf.close()
        socketio.emit('processing_status', {'status': 'processing'})
        self.inference(audio_output_path)
        self.saved_intervals.append((self.segments_to_save[0][1], self.segments_to_save[-1][1]))
        self.segments_to_save.clear()

    async def amain(self, TEXT, VOICE, OUTPUT_FILE):
        communicate = edge_tts.Communicate(TEXT, VOICE)
        await communicate.save(OUTPUT_FILE)

    def play_audio(self, file_path):
        if self.play_thread and self.play_thread.is_alive():
            self.stop_playback_flag = True
            self.play_thread.join(timeout=2)
        self.stop_playback_flag = False
        self.play_thread = threading.Thread(target=self._play_audio_internal, args=(file_path,))
        self.play_thread.start()

    def _play_audio_internal(self, file_path):
        try:
            pygame.mixer.init()
            pygame.mixer.music.load(file_path)
            socketio.emit('playback_status', {'status': 'start'})
            pygame.mixer.music.play()
            while pygame.mixer.music.get_busy():
                if self.stop_playback_flag:
                    pygame.mixer.music.stop()
                    print("播放被打断")
                    socketio.emit('playback_status', {'status': 'end'})
                    break
                time.sleep(0.1)
            socketio.emit('playback_status', {'status': 'end'})
        except Exception as e:
            print(f"播放失败: {e}")
        finally:
            pygame.mixer.quit()

    def inference(self, TEMP_AUDIO_FILE):
        input_file = TEMP_AUDIO_FILE
        res = model_senceVoice.generate(input=input_file, cache={}, language="zn", use_itn=False)
        prompt = res[0]['text'].split(">")[-1]
        print("ASR OUT:", prompt)
        if prompt:
            socketio.emit('message_user', {'status': 'success', 'type': 'user', 'text': prompt})
        if prompt and prompt.strip():
            global conversation_id
            output_text, conversation_id = run_workflow(prompt, conversation_id=conversation_id)
            print("answer", output_text)
        else:
            output_text = "请重新提问"
        
        if output_text:
            socketio.emit('message_llm', {'status': 'success', 'type': 'llm', 'text': output_text})
        language, _ = langid.classify(output_text)
        language_speaker = {"zh": 'zh-CN-YunxiNeural'}
        used_speaker = language_speaker.get(language, "zh-CN-YunxiNeural")
        print(f"检测到语种：{language}，使用音色：{used_speaker}")
        output_file = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
        asyncio.run(self.amain(output_text, used_speaker, output_file))
        self.play_audio(output_file)
    def inference_1(self,TEMP_AUDIO_FILE):
        output_text = "董事长您好      ,我是甘肃电力基于光明电力大模型的技术创新产品小智，您可以问我关于电力安全事故方面的一些问题，我会认真为您解答"
       
        if output_text:
            socketio.emit('message_llm', {'status': 'success', 'type': 'llm', 'text': output_text})
        language, _ = langid.classify(output_text)
        language_speaker = {"zh": 'zh-CN-YunxiNeural'}
        used_speaker = language_speaker.get(language, "zh-CN-YunxiNeural")
        print(f"检测到语种：{language}，使用音色：{used_speaker}")
        output_file = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
        asyncio.run(self.amain(output_text, used_speaker, output_file))
        self.play_audio(output_file)

    def force_stop_all(self):
        self.recording_active = False
        self.segments_to_save.clear()
        self.stop_playback_flag = True
        if pygame.mixer.get_init() and pygame.mixer.music.get_busy():
            pygame.mixer.music.stop()
            print("已强制停止音频播放")

audio_processor = AudioProcessor()

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/start_recording', methods=['POST'])
def start_recording():
    global audio_processor,flag_first_time
    if not flag_first_time:
        start_event.set()

    if flag_first_time:
        audio_processor.inference_1("")
        flag_first_time = False

    if not audio_processor.recording_active:
        audio_processor.recording_active = True
        start_event.set()

        return jsonify({'status': 'started'})
    return jsonify({'status': 'already_running'})

@app.route('/stop_recording', methods=['POST'])
def stop_recording():
    start_event.wait()
    # global audio_processor
    # if audio_processor.recording_active:
    #     audio_processor.recording_active = False
    #     audio_processor.force_stop_all()
    #     if audio_thread and audio_thread.is_alive():
    #         audio_thread.join()
    #     return jsonify({'status': 'stopped'})
    return jsonify({'status': 'not_running'})

@app.route('/force_stop', methods=['POST'])
def force_stop():
    start_event.wait()
    # audio_processor.force_stop_all()
    return jsonify({'status': 'forced_stopped'})

if __name__ == '__main__':
    try:
        socketio.run(app, debug=True, port=5500)
    except KeyboardInterrupt:
        print("正在退出程序...")
        if audio_thread and audio_thread.is_alive():
            audio_processor.recording_active = False
            audio_thread.join()
        print("程序已安全退出")