import os
import json
import torch
import warnings
import argparse
import numpy as np
from collections import defaultdict
from typing import Dict
import torchvision.models
import pandas as pd
from torchvision.models import ResNet101_Weights
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score

from train_eval_fund_2 import TrainerDR
from train import DeepSurModel
from config import TrainerConfig

device = 'cuda:1' if torch.cuda.is_available() else 'cpu'
result_cache = defaultdict(list)
class_name = ['正常_N', '糖尿病_D', '青光眼_G', '白内障_C', 'AMD_A', '高血压_H', '近视_M', '其他_O']

# 单眼预测，计算每个batch的预测结果
def batch(model, data) -> dict:
    imgs = data['image'].to(device)
    labels = data['labels'].to(device).float()
    filenames = data['filename'] 
    
    labels = labels.squeeze(1)
    # predictions,_ = model(imgs)
    binary_logits, disease_logits= model(imgs)
    predictions = hierarchical_predict(binary_logits, disease_logits)
    return dict(
        predictions=predictions,
        labels=labels,
        filenames=filenames
    )

# 双眼预测，把左右眼的单眼预测结果进行逻辑合并
def batch_formal(model, data) -> dict:
    left_imgs = data['left_image'].to(device)
    right_imgs = data['right_image'].to(device)
    labels = data['labels'].to(device)
    left_filenames = data['left_filename']
    right_filenames = data['right_filename']

    labels = labels.squeeze(1)
    # left_pred,_ = model(left_imgs)
    # right_pred,_ = model(right_imgs)
    binary_logits_l, disease_logits_l = model(left_imgs)
    left_pred = hierarchical_predict(binary_logits_l, disease_logits_l)
    binary_logits_r, disease_logits_r = model(left_imgs)
    right_pred = hierarchical_predict(binary_logits_r, disease_logits_r)
    predictions = merge_predicitons(left_pred, right_pred)
    return dict(
        predictions=predictions,
        labels=labels,
        left_filenames=left_filenames,
        right_filenames=right_filenames
    )

def merge_predicitons(left, right):
    left = left.to(torch.int64)
    right = right.to(torch.int64)
    N = left[:, 0] & right[:, 0]
    rest = left[:, 1:] | right[:, 1:]
    
    pred = torch.cat((N.unsqueeze(1), rest), dim=1)
    return pred

# 得到最终的预测结果，此处逻辑不用在意
def hierarchical_predict(binary_pred, disease_pred, threshold=0.5):
    binary_pred = torch.tensor(binary_pred, dtype=torch.float32)
    disease_pred = torch.tensor(disease_pred, dtype=torch.float32)

    final_pred = torch.zeros(binary_pred.shape[0], 8, device=device)  # 8分类结果
    # 正常样本标记为类别0
    normal_mask = (binary_pred >= threshold).squeeze()
    final_pred[normal_mask, 0] = 1

    # 异常样本选择疾病的类别
    abnormal_mask = ~normal_mask
    if abnormal_mask.sum() > 0:
        disease_labels = (disease_pred[abnormal_mask] >= threshold).float()  # 转换为0/1
        final_pred[abnormal_mask, 1:] = disease_labels  # 更新类别1-7

    return final_pred

def collect_result(output: Dict):
    # for k, v in output.items():
    #     result_cache[k].append(v.detach().cpu())
    # return result_cache
    for k, v in output.items():
         if isinstance(v, torch.Tensor):
             result_cache[k].append(v.detach().cpu())
         elif isinstance(v, list): # 明确处理列表（用于文件名）
             result_cache[k].extend(v) # 使用 extend 合并列表，而不是 append 列表的列表
         else:
             # 处理其他可能的类型或给出警告
             print(f"警告: collect_result 遇到未知类型 key='{k}', type='{type(v)}'")

def merge_result():
    # collected = {}
    # for k, v in result_cache.items():
    #     if len(v[0].shape) == 0:
    #         collected[k] = torch.stack(v)
    #     else:
    #         collected[k] = torch.cat(v)
    # result_cache.clear()
    # return collected
    collected = {}
    tensor_keys = [k for k, v_list in result_cache.items() if len(v_list) > 0 and isinstance(v_list[0], torch.Tensor)]
    for k in tensor_keys:
        v = result_cache[k]
        if len(v[0].shape) == 0:
            collected[k] = torch.stack(v)
        else:
            collected[k] = torch.cat(v)

    # 处理列表（假定是文件名）
    list_keys = [k for k, v_list in result_cache.items() if len(v_list) > 0 and isinstance(v_list[0], str)] 
    for k in list_keys:
         collected[k] = result_cache[k] 

    result_cache.clear()
    return collected

def metrics(data, num_classes=8) -> dict:
    # 忽略UndefinedMetricWarning警告
    warnings.filterwarnings("ignore", category=UserWarning, module="sklearn.metrics._classification")

    logits = data['predictions'].cpu().numpy()
    probabilities = torch.sigmoid(torch.tensor(logits)).numpy()

    threshold = 0.5
    predictions = (probabilities > threshold).astype(int)
    
    labels = data['labels'].cpu().numpy()
    #计算每个类别的准确率
    class_matrix = {}
    for i in range(num_classes):
        # 提取当前类别的预测标签和真实标签
        class_preds = predictions[:, i]
        class_labels = labels[:, i]

        accuracy = accuracy_score(class_labels, class_preds)
        precision = precision_score(class_labels, class_preds)
        recall = recall_score(class_labels, class_preds)
        # 统计样本数
        num = np.sum(class_labels)
        class_matrix[class_name[i]] = {
            "number" : int(num),
            "accuracy" : accuracy,
            "precision" : precision,
            "recall" : recall,
        }

    acc = accuracy_score(labels, predictions)
    precision = precision_score(labels, predictions, average='macro')
    recall = recall_score(labels, predictions, average='macro')
    auc = roc_auc_score(labels, probabilities, average='macro')

    result = dict(
        accuracy=acc,
        precision=precision,
        recall=recall,
        auc=auc,
        class_matrix=class_matrix
    )
    return result

# 进行预测
def predict(model, dataset, mode):
    model.eval()
    model.to(device)
    print("Predicting...")
    with torch.no_grad():
        if mode == 'formal test double':
            for i_batch, data in enumerate(dataset):
                output = batch_formal(model=model,data=data)
                collect_result(output)  
            print('Double eyes test finished')
        elif mode == 'formal test single':
            for i_batch, data in enumerate(dataset):
                output = batch(model=model,data=data)
                collect_result(output)      
            print('Single eye test finished')
        else:
            raise Exception   
    merged_output = merge_result()
    return merged_output

if __name__ == '__main__':
    cfg = TrainerConfig()

    # 加载训练好的模型参数（需要调整模型结构）
    model = DeepSurModel(backbone=cfg.model).to(device)

    weights = ResNet101_Weights.DEFAULT
    base_backbone_full = torchvision.models.resnet101(weights=weights)
    modules = list(base_backbone_full.children())[:-1] 
    model.cnn.backbone = torch.nn.Sequential(*modules)
    print('Load checkpoint')
    checkpoint = torch.load(cfg.model_path, map_location=device, weights_only=False)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # 加载数据
    trainer = TrainerDR()
    test_single_dataloader = trainer.formal_test_single_loader
    test_double_dataloader = trainer.formal_test_double_loader

    # 设置结果保存路径
    output_dir = os.path.join('predict_result', cfg.result_path)
    output_file_path1 = f'{cfg.result_path}_single.json'
    output_file_path2 = f'{cfg.result_path}_double.json'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # !!! 定义包含预测结果的目标 CSV 文件路径 !!!
    prediction_csv_path = '/home/zhangyichi/dataset/OIA-ODIR/validation/validation.csv' # <--- !!! 修改为你现有的 CSV 文件路径 !!!

    output_file1 = open(os.path.join(output_dir, output_file_path1), 'w', encoding='utf-8')
    output_file2 = open(os.path.join(output_dir, output_file_path2), 'w', encoding='utf-8')

    # 单眼测试
    output1 = predict(model, test_single_dataloader, mode='formal test single')
    metrics1 = metrics(data=output1)
    print(json.dumps(metrics1, ensure_ascii=False), file=output_file1)
    output_file1.flush()

    # 双眼测试
    output2 = predict(model, test_double_dataloader, mode='formal test double')
    metrics2 = metrics(data=output2)
    print(json.dumps(metrics2, ensure_ascii=False), file=output_file2)
    output_file2.flush()

    # --- 新增：识别并保存错误细节到 TXT 文件 ---
    print("\n开始记录预测错误的样本...")

    single_error_details = [] # 用于存储单眼错误的详细信息行
    if 'predictions' in output1 and 'labels' in output1 and 'filenames' in output1:
        # 注意：output1['predictions'] 可能是原始 logits 或经过 hierarchical_predict 处理的0/1结果
        # 我们需要明确的 0/1 预测结果来比较。
        # 如果 metrics 函数内部做了二值化，可以直接使用那里的逻辑；
        # 如果 output1['predictions'] 就是 hierarchical_predict 的 0/1 输出，可以直接用。
        # 这里假设 output1['predictions'] 是可以直接比较的 0/1 结果，或者需要 sigmoid+阈值处理
        # 我们使用 sigmoid + 0.5 阈值作为示例，你需要根据你的 predict/metrics 逻辑调整
        # preds1 = output1['predictions'].int() # 如果已经是 0/1
        preds1_probs = torch.sigmoid(torch.tensor(output1['predictions'].cpu().numpy())).float() # 转numpy再转tensor再sigmoid可能不是最高效的，但确保类型正确
        preds1 = (preds1_probs > 0.5).int() # 二值化预测结果

        labels1 = output1['labels'].int()
        filenames1 = output1['filenames'] # 假设 merge_result 已正确返回展平的列表

        if len(preds1) == len(labels1) == len(filenames1):
            for i in range(len(preds1)):
                # 判断错误：两个向量只要有任何一个元素不同就算错误
                if torch.any(preds1[i] != labels1[i]):
                    # 将 Tensor 转换为 Python 列表
                    pred_list = preds1[i].tolist()
                    label_list = labels1[i].tolist()
                    filename = filenames1[i]
                    # 格式化输出字符串
                    error_line = f"File: {filename} Prediction: {pred_list} Label: {label_list}"
                    single_error_details.append(error_line)
        else:
             print(f"警告：单眼预测结果({len(preds1)})、标签({len(labels1)})和文件名({len(filenames1)})长度不匹配，无法准确记录错误文件。")
    
    double_error_details = [] # 用于存储双眼错误的详细信息行
    if 'predictions' in output2 and 'labels' in output2 and 'left_filenames' in output2 and 'right_filenames' in output2:
        # 同上，确保 preds2 是可比较的 0/1 形式
        # preds2 = output2['predictions'].int() # 如果已经是 0/1
        preds2_probs = torch.sigmoid(torch.tensor(output2['predictions'].cpu().numpy())).float()
        preds2 = (preds2_probs > 0.5).int() # 二值化预测结果

        labels2 = output2['labels'].int()
        left_filenames2 = output2['left_filenames']   # 假设 merge_result 已正确返回展平的列表
        right_filenames2 = output2['right_filenames'] # 假设 merge_result 已正确返回展平的列表

        if len(preds2) == len(labels2) == len(left_filenames2) == len(right_filenames2):
            for i in range(len(preds2)):
                if torch.any(preds2[i] != labels2[i]):
                    pred_list = preds2[i].tolist()
                    label_list = labels2[i].tolist()
                    left_fn = left_filenames2[i]
                    right_fn = right_filenames2[i]
                    # 格式化输出字符串
                    error_line = f"Left: {left_fn} Right: {right_fn} Prediction: {pred_list} Label: {label_list}"
                    double_error_details.append(error_line)
        else:
            print(f"警告：双眼预测结果({len(preds2)})、标签({len(labels2)})和文件名({len(left_filenames2)}/{len(right_filenames2)})长度不匹配，无法准确记录错误文件。")

    # 保存错误详情到txt
    single_error_txt_path = os.path.join(output_dir, f'{cfg.result_path}_single_errors.txt')
    try:
        with open(single_error_txt_path, 'w', encoding='utf-8') as f_err1:
            for error_line in single_error_details:
                f_err1.write(f"{error_line}\n")
        print(f"Single eye error details saved in {single_error_txt_path} ({len(single_error_details)} errors)")
    except IOError as e:
        print(f"错误：无法写入单眼错误文件 '{single_error_txt_path}'. 原因: {e}")

    double_error_txt_path = os.path.join(output_dir, f'{cfg.result_path}_double_errors.txt')
    try:
        with open(double_error_txt_path, 'w', encoding='utf-8') as f_err2:
            for error_line in double_error_details:
                f_err2.write(f"{error_line}\n")
        print(f"Double eye error details saved in {double_error_txt_path} ({len(double_error_details)} errors)")
    except IOError as e:
        print(f"错误：无法写入双眼错误文件 '{double_error_txt_path}'. 原因: {e}")

    # --- 新增：将双眼预测结果保存到现有的 CSV 文件 ---
    print(f"\n开始将双眼预测结果更新到 CSV 文件: {prediction_csv_path}...")
    label_columns_csv = ['N', 'D', 'G', 'C', 'A', 'H', 'M', 'O'] # CSV中的目标列名

    if not os.path.exists(prediction_csv_path):
        print(f"错误：找不到要更新的现有 CSV 文件：{prediction_csv_path}")
    # 检查 output2 是否包含必要信息
    elif not ('predictions' in output2 and 'left_filenames' in output2 and 'right_filenames' in output2):
         print("错误：双眼预测输出 'output2' 中缺少必要信息 ('predictions', 'left_filenames', 'right_filenames')，无法更新CSV。")
    else:
        try:
            # 1. 加载现有的 CSV 文件
            df_target = pd.read_csv(prediction_csv_path)
            print(f"成功加载现有 CSV: {prediction_csv_path}")

            # 2. 准备预测结果 (确保是 0/1 形式) 和查找映射
            # 注意：这里的 preds2 应该和上面错误检查时使用的 preds2 保持一致
            preds2_probs = torch.sigmoid(torch.tensor(output2['predictions'].cpu().numpy())).float()
            preds2_binary = (preds2_probs > 0.5).int() # <--- 使用二值化后的结果
            left_filenames_from_output = output2['left_filenames']
            # right_filenames_from_output = output2['right_filenames'] # 如果需要更严格匹配

            # 创建从左眼文件名到预测结果的映射
            prediction_map = {}
            if len(preds2_binary) == len(left_filenames_from_output):
                for i in range(len(preds2_binary)):
                    prediction_map[left_filenames_from_output[i]] = preds2_binary[i].tolist() # 存储为列表
            else:
                # 这个警告已在错误记录部分打印过，这里可以省略或再次打印
                print(f"内部警告：预测结果({len(preds2_binary)})与文件名({len(left_filenames_from_output)})数量不匹配。")

            # 3. 遍历 DataFrame 并更新预测列
            update_count = 0
            rows_not_found_count = 0
            target_columns_exist = all(col in df_target.columns for col in ['Left-Fundus'] + label_columns_csv)

            if not target_columns_exist:
                 missing = [col for col in ['Left-Fundus'] + label_columns_csv if col not in df_target.columns]
                 raise KeyError(f"目标 CSV 文件缺少必需列: {missing}")

            for idx in df_target.index:
                current_left_fn = df_target.loc[idx, 'Left-Fundus']
                prediction = prediction_map.get(current_left_fn) # 使用 .get() 安全查找

                if prediction is not None:
                    # 检查预测向量长度是否正确
                    if len(prediction) == len(label_columns_csv):
                        # 更新 DataFrame 中对应的 N-O 列
                        df_target.loc[idx, label_columns_csv] = prediction
                        update_count += 1
                    else:
                         print(f"警告：ID {df_target.loc[idx, 'ID']} 的预测向量长度 ({len(prediction)}) 与 CSV 列数 ({len(label_columns_csv)}) 不匹配。跳过更新。")
                else:
                    # 在预测结果中未找到该文件名
                    rows_not_found_count += 1
                    # 此处保持 CSV 中该行的 N-O 列不变

            if rows_not_found_count > 0:
                 print(f"信息：现有 CSV 中有 {rows_not_found_count} 行未能根据 Left-Fundus 在预测结果中找到匹配项（这些行未更新）。")

            # 4. 保存更新后的 DataFrame 回原文件，覆盖写入
            df_target.to_csv(prediction_csv_path, index=False, encoding='utf-8')
            print(f"成功更新了 {update_count} 行的预测结果到 {prediction_csv_path}")

        except FileNotFoundError:
             print(f"错误：再次检查，找不到要更新的现有 CSV 文件：{prediction_csv_path}")
        except KeyError as e:
             print(f"错误：现有 CSV 文件格式错误或缺少必需的列: {e}")
        except Exception as e:
             print(f"更新 CSV 文件时发生意外错误: {e}")


    # --- 清理 ---
    output_file1.close()
    output_file2.close()
    print(f"\n所有处理完成。指标JSON和错误TXT保存在: {output_dir}")
    print(f"预测结果已尝试更新到: {prediction_csv_path}")