from flask import Flask, request, jsonify
from audio_processing import convert_webm_to_wav
from text_processing import normalize_text
from feedback_generation import evaluate_pronunciation
from vosk import Model, KaldiRecognizer
import wave
import json
import os
import logging

app = Flask(__name__)

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Vosk 模型路径
model = Model(r"vosk-model-en-us-0.22")

def recognize_speech(audio_path):
    """Vosk语音识别函数"""
    wf = wave.open(audio_path, "rb")
    recognizer = KaldiRecognizer(model, wf.getframerate())
    recognizer.SetWords(True)  # 启用逐字输出

    result = ""
    while True:
        data = wf.readframes(4000)
        if len(data) == 0:
            break
        if recognizer.AcceptWaveform(data):
            result += recognizer.Result()  # 完整的识别结果
        else:
            result += recognizer.PartialResult()  # 部分结果

    final_result = recognizer.FinalResult()  # 获取最终结果
    final_result_dict = json.loads(final_result)
    return final_result_dict.get('text', '')  # 返回识别文本

@app.route('/recognize', methods=['POST'])
def recognize():
    """处理用户上传的音频文件并返回反馈"""
    if 'file' not in request.files:
        logger.error('No file part in the request')
        return jsonify({'error': 'No file part in the request'}), 400

    audio_file = request.files['file']
    reference_text = request.form.get('reference_text', '')

    if audio_file.filename == '':
        logger.error('No selected file')
        return jsonify({'error': 'No selected file'}), 400

    try:
        # 保存上传的音频文件并转换格式
        temp_webm_path = 'temp_audio.webm'
        temp_wav_path = 'temp_audio.wav'
        audio_file.save(temp_webm_path)

        # 转换为wav格式
        convert_webm_to_wav(temp_webm_path, temp_wav_path)

        # 使用Vosk进行语音识别
        recognized_text = recognize_speech(temp_wav_path)

        # 文本规范化
        normalized_user_text = normalize_text(recognized_text)
        normalized_reference_text = normalize_text(reference_text)

        # 评估发音并获取反馈
        evaluation_result = evaluate_pronunciation(normalized_user_text, normalized_reference_text, temp_wav_path)

        # 将反馈列表转换为字符串
        feedback_str = '\n'.join(evaluation_result['feedback'])

        # 返回识别文本和反馈
        response = {
            'feedback': feedback_str,
            'score': evaluation_result['score'],
            'textMatchScore': evaluation_result['text_match_score'],
            'pitchScore': evaluation_result['pitch_score'],
            'intensityScore': evaluation_result['intensity_score'],
            'speechRateScore': evaluation_result['speech_rate_score']
        }
        # 打印详细的日志信息
        logger.info(f"Feedback: {response['feedback']}, Score: {response['score']}, "
            f"Text Match Score: {response['textMatchScore']}, Pitch Score: {response['pitchScore']}, "
            f"Intensity Score: {response['intensityScore']}, Speech Rate Score: {response['speechRateScore']}")
        return jsonify(response)

    except Exception as e:
        logger.error(f"Error occurred: {e}")
        return jsonify({'error': str(e)}), 500
    finally:
        # 清理临时文件
        if os.path.exists(temp_webm_path):
            os.remove(temp_webm_path)
        if os.path.exists(temp_wav_path):
            os.remove(temp_wav_path)

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
