import math
import warnings
from model import ModelProgression
from torch import nn
import torch
import numpy as np
from functools import cached_property
from trainer import Trainer
from torch.utils.data import Dataset
import pandas as pd
import cv2
import albumentations as aug
import albumentations.pytorch as aug_torch
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score

class DeepSurModel(nn.Module):
    def __init__(self, num_classes=1, backbone='resnet50'):
        super().__init__()
        self.cnn = ModelProgression(backbone=backbone, output_size=512)
        self.binary_classifier = nn.Sequential(
            nn.Linear(512, 256),
            nn.Sigmoid(),
            nn.BatchNorm1d(256),
            nn.Linear(256, 64),
            nn.Sigmoid(),
            nn.BatchNorm1d(64),
            nn.Linear(64, num_classes),
            # nn.Sigmoid() 
            # 后续有用于计算最终损失的函数，这里不需要使用sigmoid
        )
        self.multi_classifier = nn.Sequential(
            nn.Linear(512, 256),
            nn.Sigmoid(),
            nn.BatchNorm1d(256),
            nn.Linear(256, 64),
            nn.Sigmoid(),
            nn.BatchNorm1d(64),
            nn.Linear(64, 7), # 七类疾病
            # nn.Sigmoid() 
            # 后续有用于计算最终损失的函数，这里不需要使用sigmoid
        )
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.binary_classifier.modules():
            if isinstance(m, nn.Linear):
                # 使用 Xavier 初始化
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
        for m in self.multi_classifier.modules():
            if isinstance(m, nn.Linear):
                # 使用 Xavier 初始化
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)


    def forward(self, x): 
        x = self.cnn(x)

        # 二分类任务的输出
        binary_logits = self.binary_classifier(x)
        is_abnormal = torch.sigmoid(binary_logits)  # 判断是否为异常
        
        # 多分类任务的输出
        multiclass_logits = self.multi_classifier(x)  # 始终计算多分类logits
        # multiclass_logits = torch.where(is_abnormal > 0.5, multiclass_logits, torch.zeros_like(multiclass_logits))
        return binary_logits, multiclass_logits  # 输出logits 
    

class ProgressionData(Dataset):

    def __init__(self, datasheet, transform, mode):
        super().__init__()
        
        # print(self.df)
        self.transform = transform
        self.disease_columns = ['N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
        self.abnormal_columns = ['D', 'G', 'C', 'A', 'H', 'M', 'O']
        
        if mode == 'train':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Training Set/Images/"
            self.df = pd.read_csv(datasheet + 'train single.csv')
        elif mode == 'test':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'test single.csv')
        elif mode == 'val':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'val single.csv')
            # self.df = pd.concat([self.left_df, self.right_df], axis=0)
        else:
            print("Fail to load dataset")
            raise Exception
        
        self.df['is_healthy'] = ((self.df['N'] == 1) & (self.df[self.abnormal_columns] == 0).all(axis=1)).astype(int)
        

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        # 读取左右眼图像路径
        img_file = self.path_prefix + self.df.iloc[idx]['Fundus']

        # 读取左右眼图像
        image = cv2.imread(img_file, cv2.IMREAD_COLOR)

        # 应用数据变换
        image = self.transform(image=image)['image']

        # 读取疾病标签
        labels = self.df.iloc[idx][self.disease_columns].values.astype(float)
        is_healthy = self.df.iloc[idx]['is_healthy'].astype(float)
        disease_labels = self.df.iloc[idx][self.abnormal_columns].values.astype(float)

        return dict(
            image=image,
            labels=labels,
            binary_labels=is_healthy,
            disease_labels=disease_labels
        )

class TrainerDR(Trainer):

    # 加载预训练的ResNet权重，在ModelProgression中又添加了注意力机制，用于提取图像特征
    @cached_property
    def model(self):
        model = DeepSurModel().to(self.device)
        if self.cfg.load_pretrain is not None:
            print('loading ', self.cfg.load_pretrain)
            print(model.cnn.backbone.load_state_dict(
                torch.load(self.cfg.load_pretrain, map_location=self.device)
            ))
        else:
            print("Not load pretrained model")
        return model

    @cached_property
    def beta(self):
        return 1

    @cached_property
    def train_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.Flip(p=0.5),
            aug.ImageCompression(quality_lower=10, quality_upper=80, p=0.2),
            aug.MedianBlur(p=0.3),
            aug.RandomBrightnessContrast(p=0.5),
            aug.RandomGamma(p=0.2),
            aug.GaussNoise(p=0.2),
            aug.Rotate(border_mode=cv2.BORDER_CONSTANT,
                       value=0, p=0.7, limit=45),
            # aug.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Training Set/Annotation/', transform, mode='train')

    # 更名为val_dataset，加载的实际上是验证集
    @cached_property
    def val_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Annotation/', transform, mode='val')

    @cached_property
    def test_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', transform, mode='test')

    @cached_property
    def optimizer(self):
        optimizer = torch.optim.Adam(
            self.model.parameters(), lr=self.cfg.lr, weight_decay=1e-5)
        return optimizer

    # 处理每个batch的数据，包括数据准备、前向传播、计算loss、返回各种结果
    def batch(self, epoch, i_batch, data) -> dict:

        beta = 0.78

        # get and prepare data elements
        imgs = data['image'].to(self.device)

        labels = data['labels'].to(self.device).float().squeeze(1)
        binary_labels = data['binary_labels'].to(self.device).float().unsqueeze(1)
        disease_labels = data['disease_labels'].to(self.device).float()
        # labels = labels.unsqueeze(1)

        # 合并左右眼特征，这里简单相加，可根据需求修改
        # combined_features = left_features + right_features
       
        binary_logits, multi_logits = self.model(imgs)
        # print(torch.sigmoid(binary_logits),torch.sigmoid(multi_logits))

         # 定义损失函数，这里使用多标签交叉熵损失
        criterion = nn.BCEWithLogitsLoss()
        # criterion = nn.CrossEntropyLoss()
        binary_loss = criterion(binary_logits, binary_labels)
        
        abnormal_mask = (binary_labels == 0).squeeze()  # 假设异常标签为0
        if abnormal_mask.sum() > 0:  # 存在异常样本时才计算
            multi_loss = nn.BCEWithLogitsLoss()(
                multi_logits[abnormal_mask], 
                disease_labels[abnormal_mask]
            )
        else:
            multi_loss = torch.tensor(0.0).to(self.device)
        

        total_loss = binary_loss + beta * multi_loss

        # 可以根据需要添加正则化项
        # loss += ...

        return dict(
            loss=total_loss,
            binary_pred=torch.sigmoid(binary_logits),
            multi_pred=torch.sigmoid(multi_logits),
            labels=labels,
        )

    def hierarchical_predict(self,binary_pred, disease_pred, threshold=0.5):
        """
        合并预测结果：
        - 正常样本：类别0
        - 异常样本：类别1-7
        """

        binary_pred = torch.tensor(binary_pred, dtype=torch.float32)
        disease_pred = torch.tensor(disease_pred, dtype=torch.float32)

        final_pred = torch.zeros(binary_pred.shape[0], 8)  # 8分类结果
        # print(final_pred,"WWW")
        # 正常样本标记为类别0
        normal_mask = (binary_pred >= threshold).squeeze()
        final_pred[normal_mask, 0] = 1
        # print(final_pred[:10],"QQQ")

        # 异常样本选择疾病的类别
        abnormal_mask = ~normal_mask
        if abnormal_mask.sum() > 0:
            disease_labels = (disease_pred[abnormal_mask] >= threshold).float()  # 转换为0/1
            # print(disease_labels[:10],"RRR")
            final_pred[abnormal_mask, 1:] = disease_labels  # 更新类别1-7

        return final_pred

    def metrics(self, epoch, data) -> dict:
        # 忽略UndefinedMetricWarning警告
        warnings.filterwarnings("ignore", category=UserWarning, module="sklearn.metrics._classification")
        # 获取预测概率
        binary_pred = data['binary_pred'].cpu().numpy()
        multi_pred = data['multi_pred'].cpu().numpy()
        labels = data['labels'].cpu().numpy()

        # print(binary_pred[:10],"AAAA")
        # print(multi_pred[:10],"BBBB")

        # 获取最终预测结果
        final_pred = self.hierarchical_predict(binary_pred, multi_pred).numpy().astype(int)
        
        # 获取真实标签
        labels = data['labels'].cpu().numpy()
        # print(labels[:10],"BBB")

        # 计算分类准确率
        acc = accuracy_score(labels, final_pred)
        
        # 计算精确率，使用宏平均
        precision = precision_score(labels, final_pred, average='macro')
        
        # 计算召回率，使用宏平均
        recall = recall_score(labels, final_pred, average='macro')
        
        # 计算 AUC
        auc = roc_auc_score(labels, final_pred, average='macro')


        result = dict(
            loss=float(data['loss'].mean()),
            accuracy=acc,
            precision=precision,
            recall=recall,
            auc=auc
        )
        # print("Result: ", result)
        return result



if __name__ == '__main__':
    trainer = TrainerDR()
    trainer.train()
