import math
import warnings
from model import ModelProgression
from torch import nn
import torch
import numpy as np
from functools import cached_property
from trainer import Trainer
from config import TrainerConfig
from losses import IB_FocalLoss, IBLoss, FocalLoss, MultiFocalLoss
from torch.utils.data import Dataset
import pandas as pd
import cv2
import albumentations as aug
import albumentations.pytorch as aug_torch
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score

warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.modules.instancenorm")

class DeepSurModel(nn.Module):
    def __init__(self, num_classes=1, backbone='resnet50'):
        super().__init__()
        self.cnn = ModelProgression(backbone=backbone, output_size=512)

        self.binary_feature = nn.Sequential(
            nn.Linear(512, 256),
            nn.InstanceNorm1d(256),  # 使用实例归一化解耦
            nn.ReLU()
        )
        self.binary_head = nn.Linear(256, 1)
        
        # 多分类特征提取（独立路径）
        self.disease_feature = nn.Sequential(
            nn.Linear(512, 256),
            nn.LayerNorm(256),  # 使用层归一化解耦
            nn.ReLU()
        )
        self.disease_head = nn.Linear(256, 7)

    def _initialize_weights(self):
        for m in self.binary_classifier.modules():
            if isinstance(m, nn.Linear):
                # 使用 Xavier 初始化
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
        for m in self.multi_classifier.modules():
            if isinstance(m, nn.Linear):
                # 使用 Xavier 初始化
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        base_feat = self.cnn(x)
        
        # 解耦特征
        binary_feat = self.binary_feature(base_feat)
        disease_feat = self.disease_feature(base_feat)

        binary_logits = self.binary_head(binary_feat)
        disease_logits = self.disease_head(disease_feat)
        
        return binary_logits, disease_logits, binary_feat, disease_feat

class ProgressionData(Dataset):

    def __init__(self, datasheet, transform, mode):
        super().__init__()
        
        # print(self.df)
        self.transform = transform
        self.label_columns = ['N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
        self.abnormal_columns = ['D', 'G', 'C', 'A', 'H', 'M', 'O']
        
        if mode == 'train':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Training Set/Images/"
            self.df = pd.read_csv(datasheet + 'train single.csv')
        elif mode == 'test':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'test single.csv')
        elif mode == 'val':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'val single.csv')
            # self.df = pd.concat([self.left_df, self.right_df], axis=0)
        else:
            print("Fail to load dataset")
            raise Exception
        
        self.df['is_healthy'] = ((self.df['N'] == 1) & (self.df[self.abnormal_columns] == 0).all(axis=1)).astype(int)
        
    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        # 读取左右眼图像路径
        img_file = self.path_prefix + self.df.iloc[idx]['Fundus']

        # 读取左右眼图像
        image = cv2.imread(img_file, cv2.IMREAD_COLOR)

        # 应用数据变换
        image = self.transform(image=image)['image']

        # 读取疾病标签
        labels = self.df.iloc[idx][self.label_columns].values.astype(float)
        is_healthy = self.df.iloc[idx]['is_healthy'].astype(float)
        disease_labels = self.df.iloc[idx][self.abnormal_columns].values.astype(float)

        return dict(
            image=image,
            labels=labels,
            binary_labels=is_healthy,
            disease_labels=disease_labels
        )

    # 获取每个类别有多少样本，列表形式返回
    def get_cls_num_list(self):
        labels = self.df.loc[:, self.label_columns].values.astype(float)
        cls_num_list = []
        for i in range(len(self.label_columns)):
            class_labels = labels[:, i]
            num = np.sum(class_labels)
            cls_num_list.append(num)
            # print('cls_num_list: ', cls_num_list)
        return cls_num_list


class TrainerDR(Trainer):
    
    def __init__(self):
        super().__init__()
        self.test_transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        self.cfg = TrainerConfig()

    # 加载预训练的ResNet权重，在ModelProgression中又添加了注意力机制，用于提取图像特征
    @cached_property
    def model(self):
        model = DeepSurModel().to(self.device)
        # 根据配置的参数确定是否加载checkpoint继续训练
        if self.cfg.load_checkpoint is not None:
            print('Load checkpoint')
            checkpoint = torch.load(self.cfg.load_checkpoint, map_location=self.device, weights_only=False)
            model.load_state_dict(checkpoint['model_state_dict'])
            self.cfg.start_epoch = checkpoint['epoch'] + 1
            self.cfg.epochs = 100
        else:
            print("Not load checkpoint")
            if self.cfg.load_pretrain is not None:
                print('loading pretrained model', self.cfg.load_pretrain)
                print(model.cnn.backbone.load_state_dict(
                    torch.load(self.cfg.load_pretrain, map_location=self.device, weights_only=True)
                ))
            else:
                print("Not load pretrained model")
        return model

    @cached_property
    def beta(self):
        return 1

    # 通过训练集的各类样本数，以IBReweight的方式计算权重
    @cached_property
    def weights(self):
        cls_num_list = self.train_dataset.get_cls_num_list()
        cls_num_list.pop(0)
        # print('cls_num_list: ', cls_num_list)
        per_cls_weights = 1.0 / np.array(cls_num_list)
        per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
        per_cls_weights = torch.FloatTensor(per_cls_weights).to(self.device)
        # print('weight: ', per_cls_weights)
        return per_cls_weights

    @cached_property
    def train_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.Flip(p=0.5),
            aug.ImageCompression(quality_lower=10, quality_upper=80, p=0.2),
            aug.MedianBlur(p=0.3),
            aug.RandomBrightnessContrast(p=0.5),
            aug.RandomGamma(p=0.2),
            aug.GaussNoise(p=0.2),
            aug.Rotate(border_mode=cv2.BORDER_CONSTANT,
                       value=0, p=0.7, limit=45),
            # aug.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Training Set/Annotation/', transform, mode='train')

    # 更名为val_dataset，加载的实际上是验证集
    @cached_property
    def val_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Annotation/', self.test_transform, mode='val')

    @cached_property
    def test_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', self.test_transform, mode='test')

    # 用于双眼的预测
    @cached_property
    def formal_test_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', self.test_transform, mode='formal test')
    
    @cached_property
    def optimizer(self):
        optimizer = torch.optim.Adam(
            self.model.parameters(), lr=self.cfg.lr, weight_decay=1e-5)
        if self.cfg.load_checkpoint is not None:
            checkpoint = torch.load(self.cfg.load_checkpoint, map_location=self.device, weights_only=False)
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        return optimizer

    # 处理每个batch的数据，包括数据准备、前向传播、计算loss、返回各种结果
    def batch(self, epoch, i_batch, data) -> dict:

        # beta = 0.78

        # get and prepare data elements
        imgs = data['image'].to(self.device)

        labels = data['labels'].to(self.device).float().squeeze(1)
        binary_labels = data['binary_labels'].to(self.device).float().unsqueeze(1)
        disease_labels = data['disease_labels'].to(self.device).float()
        # labels = labels.unsqueeze(1)

        # 合并左右眼特征，这里简单相加，可根据需求修改
        # combined_features = left_features + right_features
       
        binary_logits, multi_logits, binary_feat, multi_feat = self.model(imgs)
        # print(f'{binary_logits.shape}', 'AAAAAAA')
        # print(f'{multi_logits.shape}', 'BBBBBB')
        # print(f'{binary_feat.shape}', 'CCCCCCC')
        # print(f'{multi_feat.shape}', 'DDDDD')
         # 定义损失函数，这里使用多标签交叉熵损失
        bce_criterion = nn.BCEWithLogitsLoss()
        binary_loss = bce_criterion(binary_logits, binary_labels)

        # ib_focal_criterion = IB_FocalLoss(weight=self.weights[1:], alpha=1000, gamma=1)
        focal_criterion = MultiFocalLoss(alpha=self.weights, gamma=2)
        # criterion = nn.CrossEntropyLoss()   
        
        abnormal_mask = (binary_labels == 0).squeeze()  # 假设异常标签为0
        if abnormal_mask.sum() > 0:  # 存在异常样本时才计算
            if epoch < self.cfg.epochs / 2: 
                multi_loss = bce_criterion(
                    multi_logits[abnormal_mask], 
                    disease_labels[abnormal_mask]
                )
            else:
                # 这个features，我也不知道干啥，说是用来衡量每个样本对模型的影响力，总之论文里代码这么干的，烦死了烦死了
                # features = torch.sum(torch.abs(multi_feat[abnormal_mask]), 1).reshape(-1, 1)
                multi_loss = focal_criterion(
                    multi_logits[abnormal_mask], 
                    disease_labels[abnormal_mask],
                )
        else:
            multi_loss = torch.tensor(0.0).to(self.device)

        # 计算动态权重
        with torch.no_grad():
            loss_ratio = binary_loss.detach() / (multi_loss.detach() + 1e-8)
            beta = torch.clamp(loss_ratio, 0.5, 2.0)  # 限制权重范围

        total_loss = binary_loss + beta * multi_loss

        # 可以根据需要添加正则化项
        # loss += ...

        return dict(
            loss=total_loss,
            binary_pred=torch.sigmoid(binary_logits),
            multi_pred=torch.sigmoid(multi_logits),
            dynamic_beta=beta,
            labels=labels,
        )

    def hierarchical_predict(self,binary_pred, disease_pred, threshold=0.5):
        """
        合并预测结果：
        - 正常样本：类别0
        - 异常样本：类别1-7
        """
        binary_pred = torch.tensor(binary_pred, dtype=torch.float32)
        disease_pred = torch.tensor(disease_pred, dtype=torch.float32)

        final_pred = torch.zeros(binary_pred.shape[0], 8)  # 8分类结果
        # print(final_pred,"WWW")
        # 正常样本标记为类别0
        normal_mask = (binary_pred >= threshold).squeeze()
        final_pred[normal_mask, 0] = 1
        # print(final_pred[:10],"QQQ")

        # 异常样本选择疾病的类别
        abnormal_mask = ~normal_mask
        if abnormal_mask.sum() > 0:
            disease_labels = (disease_pred[abnormal_mask] >= threshold).float()  # 转换为0/1
            # print(disease_labels[:10],"RRR")
            final_pred[abnormal_mask, 1:] = disease_labels  # 更新类别1-7

        return final_pred

    def metrics(self, epoch, data) -> dict:
        # 忽略UndefinedMetricWarning警告
        warnings.filterwarnings("ignore", category=UserWarning, module="sklearn.metrics._classification")
        # 获取预测概率
        binary_pred = data['binary_pred'].cpu().numpy()
        multi_pred = data['multi_pred'].cpu().numpy()
        labels = data['labels'].cpu().numpy()

        # print(binary_pred[:10],"AAAA")
        # print(multi_pred[:10],"BBBB")

        # 获取最终预测结果
        final_pred = self.hierarchical_predict(binary_pred, multi_pred).numpy().astype(int)
        
        # 获取真实标签
        labels = data['labels'].cpu().numpy()
        # print(labels[:10],"BBB")

        # 计算分类准确率
        acc = accuracy_score(labels, final_pred)
        
        # 计算精确率，使用宏平均
        precision = precision_score(labels, final_pred, average='macro')
        
        # 计算召回率，使用宏平均
        recall = recall_score(labels, final_pred, average='macro')
        
        # 计算 AUC
        auc = roc_auc_score(labels, final_pred, average='macro')


        result = dict(
            beta=float(data['dynamic_beta'].mean()),
            loss=float(data['loss'].mean()),
            accuracy=acc,
            precision=precision,
            recall=recall,
            auc=auc
        )
        # print("Result: ", result)
        return result



if __name__ == '__main__':
    trainer = TrainerDR()
    trainer.train()
