import io
import random

import librosa
import numpy
import torch
from flask import Flask, request, jsonify
import os

from pydub import AudioSegment
from werkzeug.utils import secure_filename
from db import get_db_connection
from utils import generate_token, decode_token
from flask_cors import CORS
import shutil

from lib.trainer_end_to_end import *
from lib.feature_extractor_app import *

app = Flask(__name__)
CORS(app)

UPLOAD_FOLDER = 'temp_audio'
PROCESSED_FOLDER = 'voiceprints'
threshold = 0.7

os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(PROCESSED_FOLDER, exist_ok=True)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

model_path = r"C:\Users\xendx\Desktop\final_model"
model = torch.load(model_path)
model.eval()  # 设置为评估模式

def create_response(message, status, http_status):
    return jsonify({'status': status, 'message': message}), http_status

@app.route('/api/register', methods=['POST'])
def register():
    data = request.get_json()
    phonenum = data.get('phonenum')
    password = data.get('password')

    if not phonenum or not password:
        return create_response('手机号或密码格式不正确', 3, 400)

    conn = get_db_connection()
    cursor = conn.cursor()

    cursor.execute("SELECT * FROM User WHERE userId = %s", (phonenum,))
    user = cursor.fetchone()
    if user:
        return create_response('用户已注册', 2, 400)

    cursor.execute("INSERT INTO User (userId, password, voiceOK) VALUES (%s, %s, %s)",
                   (phonenum, password, False))
    conn.commit()
    cursor.close()
    conn.close()
    return create_response('注册成功', 1, 200)

@app.route('/api/login', methods=['POST'])
def login():
    data = request.get_json()
    phonenum = data.get('phonenum')
    password = data.get('password')

    if not phonenum or not password:
        return create_response('手机号或密码格式不正确', 3, 400)

    conn = get_db_connection()
    cursor = conn.cursor()

    cursor.execute("SELECT * FROM User WHERE userId = %s AND password = %s", (phonenum, password))
    user = cursor.fetchone()
    cursor.close()
    conn.close()

    if not user:
        return create_response('手机号或密码不正确', 3, 400)

    token = generate_token(phonenum)
    voice_ok = user[2]

    if voice_ok:
        return jsonify({'status': 2, 'message': '登录成功，用户已录入声纹信息', 'token': token}), 200
    else:
        return jsonify({'status': 1, 'message': '登录成功，用户未录入声纹信息', 'token': token}), 200

@app.route('/api/app/voiceClear', methods=['POST'])
def voice_clear():
    token = request.headers.get('Authorization').split(" ")[1]
    user_id = decode_token(token)

    if not user_id:
        return create_response('登录状态无效', 2, 401)

    user_folder = os.path.join(PROCESSED_FOLDER, user_id)
    if os.path.exists(user_folder):
        shutil.rmtree(user_folder)

    conn = get_db_connection()
    cursor = conn.cursor()
    cursor.execute("UPDATE User SET voiceOK = %s WHERE userId = %s", (False, user_id))
    conn.commit()
    cursor.close()
    conn.close()

    return create_response('清除声纹信息成功', 1, 200)

@app.route('/api/app/voiceEntry', methods=['POST'])
def voice_entry():
    token = request.headers.get('Authorization').split(" ")[1]
    user_id = decode_token(token)

    if not user_id:
        return create_response('登录状态无效', 2, 401)

    if 'audio' not in request.files:
        return create_response('未检测到符合要求的音频输入', 2, 400)

    audio = request.files['audio']
    try:
        # 将音频文件转换为标准 WAV 格式
        audio_data = audio.read()
        audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format='wav')
        temp_wav_path = './temp.wav'
        audio_segment.export(temp_wav_path, format='wav')

        audio_buffer = io.BytesIO()
        audio_segment.export(audio_buffer, format='wav')
        audio_buffer.seek(0)

        # 使用 librosa 读取音频文件
        y, sr = librosa.load(audio_buffer, sr=None)

        # 分割音频（假设你有 split_audio 函数）
        audio_segments = split_audio(y, sr)

        for seg in audio_segments:
            print(seg)

        if len(audio_segments) < 5:
            print(len(audio_segments))
            raise ValueError('未得到足够的音频输入')

        # 提取特征（假设你有 extract_features 函数）
        features = []
        for segment in audio_segments:
            features.extend(extract_features(segment, sr))

        # 从 features 中随机选择 5 个样本
        selected_features = random.sample(features, 5)
        batch = torch.stack([torch.tensor(f).float().unsqueeze(0) for f in selected_features])

        # 使用模型进行特征提取
        with torch.no_grad():
            vectors = model.feature_extractor(batch)

        os.makedirs(os.path.join(PROCESSED_FOLDER, user_id), exist_ok=True)

        for index, vector in enumerate(vectors):
            filepath = os.path.join(PROCESSED_FOLDER, user_id, f"{index}.npy")
            np.save(filepath, vector.numpy())

        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute("UPDATE User SET voiceOK = %s WHERE userId = %s", (True, user_id))
        conn.commit()
        cursor.close()
        conn.close()

        return create_response('声纹注册成功', 1, 200)

    except Exception as e:
        print(f"Error processing audio: {e}")
        return create_response('未检测到符合要求的音频输入', 2, 400)

@app.route('/api/app/voiceVerify', methods=['POST'])
def voice_verify():
    token = request.headers.get('Authorization').split(" ")[1]
    user_id = decode_token(token)

    if not user_id:
        return create_response('登录状态无效', 2, 401)

    if 'audio' not in request.files:
        return create_response('未检测到符合要求的音频输入', 2, 400)

    audio = request.files['audio']
    try:
        # 将音频文件转换为标准 WAV 格式
        audio_data = audio.read()
        audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format='wav')
        temp_wav_path = './temp.wav'
        audio_segment.export(temp_wav_path, format='wav')

        audio_buffer = io.BytesIO()
        audio_segment.export(audio_buffer, format='wav')
        audio_buffer.seek(0)

        # 使用 librosa 读取音频文件
        y, sr = librosa.load(audio_buffer, sr=None)

        # 分割音频（假设你有 split_audio 函数）
        audio_segments = split_audio(y, sr)

        if len(audio_segments) < 1:
            print(len(audio_segments))
            raise ValueError('未得到足够的音频输入')

        # 提取特征（假设你有 extract_features 函数）
        features = []
        for segment in audio_segments:
            features.extend(extract_features(segment, sr))

        # 从 features 中随机选择 1 个样本
        selected_feature = random.choice(features)
        selected_tensor = torch.tensor(selected_feature).float().unsqueeze(0).unsqueeze(0)

        # 使用模型提取特征向量
        with torch.no_grad():
            selected_vector = model.feature_extractor(selected_tensor)
            selected_vector = selected_vector.squeeze(0)

        user_folder = os.path.join(PROCESSED_FOLDER, user_id)
        if not os.path.exists(user_folder):
            raise ValueError('未找到声纹信息')

        saved_vectors = []
        for filename in os.listdir(user_folder):
            if filename.endswith(".npy"):
                vector = np.load(os.path.join(user_folder, filename))
                vector_tensor = torch.tensor(vector)
                saved_vectors.append(vector_tensor)

        if not saved_vectors:
            raise ValueError('未找到声纹信息')

        positive_count = 0

        with torch.no_grad():
            for saved_vector in saved_vectors:
                normalized_similarity = model.siamese_network(selected_vector.unsqueeze(0), saved_vector.unsqueeze(0))
                print(normalized_similarity)
                if normalized_similarity > threshold:
                    positive_count += 1

        print("positive_count", positive_count)

        if positive_count >= 3:
            return create_response('验证成功', 1, 200)
        else:
            return create_response('验证失败', 2, 200)

    except Exception as e:
        print(f"Error verifying audio: {e}")
        return create_response('未检测到符合要求的音频输入', 2, 400)

if __name__ == '__main__':
    app.run(debug=True)
