import librosa
import numpy as np
from requests import Response
from datetime import datetime
import json
import logging
import os
from keras.models import load_model
from .disease_response import  DiseaseResponse
from django.conf import settings

# 全局变量
logger = logging.getLogger(__name__)
# model = load_model(r"./model/SE_DCBLSTMNet_modelcheckpoint_cough_breath_speech.h5")
model = load_model(r"./model/GAMMNet-v1.h5")
disease_name = ["健康", "哮喘", "新冠肺炎", "常见感冒"]  # 对应模型输出的标签
# 获取上一级目录
current_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 构建文件路径(注意热插拔，要把每个app的static和templates文件夹都放在自身app下，而其他通用放在大的static和templates文件夹下)
disease_json_file_path = os.path.join(
    current_directory, "static", "json", "disease.json"
)

def normalization(wav_data):
    """_summary_

    Args:
        wav_data (np/array): _description_

    Returns:
        np.array: _description_
    """
    wav_data = wav_data.tolist()
    while len(wav_data) < 110500:
        wav_data.append(0)  # 长度归一化
    if len(wav_data) > 110500:
        wav_data = wav_data[0:110500]
    wav_data = np.array(wav_data)
    return wav_data

def process_audio_files(request):
    """处理上传的音频文件并提取特征

    Args:
        request: HTTP请求对象，包含上传的音频文件

    Returns:
        np.ndarray: 处理后的音频特征数据
    """
    SAMPLE_LENGTH = 110500
    SAMPLE_RATE = 22050
    N_MFCC = 40
    
    user_data = []
    audio_types = ["cough_file", "breath_file", "speech_file"]
    
    logger.info(request.FILES)
    """_summary_
        这里的FILE文件是存在内存中的，
        <MultiValueDict: {'cough_fi
        le': [<InMemoryUploadedFile: cough-shallow.wav (audio/wav)>], 'breath_file': [<InMemoryUploadedFile: breathing-shallow.wav (audio/wav)>], 'speech_file': [<InMemoryUploadedFile: vowel-e.wav (audio/wav)>]}>
        """
    for audio_type in audio_types:
        audio_data = np.zeros(SAMPLE_LENGTH)
        file_obj = request.FILES.get(audio_type)
        
        if not file_obj:
            user_data.append(process_empty_audio(audio_data, SAMPLE_RATE, N_MFCC))
            continue
            
        try:
             # 保存音频文件用于测试 
            save_test_audio(file_obj,audio_type) # 打开后with会自动关闭，故要先存贮 
            audio_data = process_audio_file(file_obj, SAMPLE_LENGTH)
        except Exception as e:
            if "LibsndfileError" in str(e):
                return Response(
                    {"error": "音频文件格式错误，请确认录音参数"}, 
                    status=400
                )
                
        user_data.append(extract_mfcc_features(audio_data, SAMPLE_RATE, N_MFCC))
    
        
    return prepare_model_input(np.array(user_data))

def process_empty_audio(audio_data, sample_rate, n_mfcc):
    """处理空音频数据"""
    return np.array(
        librosa.feature.mfcc(
            y=audio_data.astype(np.float32), 
            sr=sample_rate,
            n_mfcc=n_mfcc
        )
    )

def process_audio_file(file_obj, sample_length):
    """处理音频文件"""
    # 去除微信小程序录音上传参数污染 例如：xxx.durationTime=2.5.wav
    if ".durationTime=" in file_obj.name:
        file_obj.name = file_obj.name.replace(".durationTime=", "")
        
    with file_obj.open("rb") as file:
        audio_data, _ = librosa.load(file)
        return normalization(audio_data)

def extract_mfcc_features(audio_data, sample_rate, n_mfcc):
    """提取MFCC特征"""
    return np.array(
        librosa.feature.mfcc(
            y=audio_data.astype(np.float32),
            sr=sample_rate,
            n_mfcc=n_mfcc
        )
    )

def save_test_audio(file_obj,audio_type):
    """保存音频文件用于测试"""
    save_path = os.path.join(
        settings.BASE_DIR,
        "test",
        "api_upload_audio",
        datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + audio_type + "_"+ file_obj.name[-13] 
    )
    # Ensure file is open before reading chunks  
    # 这是因为 InMemoryUploadedFile 是一个特殊的文件对象，它存储在内存中，而不是在磁盘上。需要先打开文件 然后再读取数据
    if not file_obj.closed:
        with open(save_path, "wb+") as destination:
            for chunk in file_obj.chunks():
                destination.write(chunk)

def prepare_model_input(user_data):
    """准备模型输入数据"""
    user_data = np.transpose(user_data, (0, 2, 1))
    return np.expand_dims(user_data, axis=0)


# 这里的get_prediction函数是用来获取预测结果的，它接收用户数据，调用模型进行预测，然后返回预测结果。
def get_prediction_dict(user_data):
    y_pred = model.predict(user_data)
    y_label = np.argmax(y_pred)
    # 将概率转换为百分比（保留两位小数）
    logger.info("y_pred: " + str(y_pred))
    format_confidence = [round(_ * 100, 2) for _ in y_pred[0].tolist()]
    format_dict = {disease_name[i]: format_confidence[i] for i in range(len(format_confidence))}
    with open(
        disease_json_file_path,
        "r",
        encoding="utf-8",
    ) as f:
        disease_data = json.load(f)

    if max(y_pred[0]) < 0.8:  # 如果最大值大于0.8 则为该疾病 
        return DiseaseResponse.create_other_disease(format_dict)
    return  DiseaseResponse.create_response(
        diagnosis_result=disease_name[y_label],
        prediction_rate=format_dict,
        introduction=disease_data[disease_name[y_label]].get("introduction"),
        img_url=disease_data[disease_name[y_label]].get("img_url"),
        treatment=disease_data[disease_name[y_label]]["treatment"],
        # diagnosis_datetime 有默认值了，应该叫做datetime才对
    )   
    # response = {
    #         "diagnosis_result": disease_name[y_label],
    #         "prediction_rate": {  # 字典也可以用列表推导式
    #             disease_name[i]: format_confidence[i]
    #             for i in range(len(format_confidence))
    #         },  # 结果要为列表，不能为numpy否则无法json序列化(注释前因后果要完善！)
    #         "introduction": disease_data[disease_name[y_label]].get("introduction"),
    #         "img_url": disease_data[disease_name[y_label]].get("img_url"),
    #         "treatment": disease_data[disease_name[y_label]]["treatment"],
    #         "disease_name": disease_name,
    #         "diagnosis_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
    #         # 当前时间 （失策了！和model数据模型自动产生数据重复了，应该在这里赋值的而不会冗余，说明软件开发过程需求业务场景的设计完整后在确定开发，小型不断迭代）
    # }
    # return response

