import socket
import pyaudio
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from load_model import model
from audio_change import convert_to_wav
import Levenshtein
import wave
import os
import threading

# 网络参数
HOST = '0.0.0.0'  # 监听所有网络接口
PORT = 5000       # 监听端口
BUFFER_SIZE = 4096

# 音频参数
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
OUTPUT_FILE = "received_audio.wav"

# 创建socket服务器
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((HOST, PORT))
server_socket.listen(1)

# 初始化pyaudio
p = pyaudio.PyAudio()
frames = []

def handle_client(client_socket):
    global frames
    print("客户端已连接")
    
    # 创建图形
    fig, ax = plt.subplots()
    x = np.arange(0, CHUNK)
    line, = ax.plot(x, np.random.rand(CHUNK))
    ax.set_ylim(-32768, 32767)
    ax.set_xlim(0, CHUNK)
    
    # 更新函数，用于实时更新波形
    def update(frame):
        data = client_socket.recv(BUFFER_SIZE)
        if not data:
            return line,
        audio_data = np.frombuffer(data, dtype=np.int16)
        line.set_ydata(audio_data)
        frames.append(data)
        return line,
    
    # 创建动画
    ani = FuncAnimation(fig, update, interval=0)
    plt.show()
    
    # 保存接收到的音频
    wf = wave.open(OUTPUT_FILE, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()
    
    # 转换为wav格式
    converted_audio = convert_to_wav(OUTPUT_FILE, "converted_audio.wav")
    
    # 语音转文本
    recognized_text = speech_to_text(converted_audio)
    print("识别文本:", recognized_text)
    
    # 计算评分
    score = calculate_pronunciation_score(recognized_text, reference_text)
    print("读音分数:", score)
    
    # 将评分发送回客户端
    client_socket.sendall(str(score).encode())
    client_socket.close()

# 语音转文本函数
def speech_to_text(audio_file):
    w = wave.open(audio_file, 'rb')
    assert w.getnchannels() == 1, "Audio must be mono"
    assert w.getframerate() == 16000, "Audio must be 16kHz"
    buffer = w.readframes(w.getnframes())
    data = np.frombuffer(buffer, dtype=np.int16)
    text = model.stt(data)
    return text

# 计算读音分数函数
def calculate_pronunciation_score(recognized_text, reference_text):
    distance = Levenshtein.distance(recognized_text, reference_text)
    max_length = max(len(recognized_text), len(reference_text))
    score = (max_length - distance) / max_length * 100
    return score

# 标准文本
reference_text = "这里应该是标准文本"

# 启动服务器
print(f"服务器启动，监听 {HOST}:{PORT}")
client_sock, addr = server_socket.accept()
handle_client(client_sock)