import math,os
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.modules.instancenorm")
from model import ModelProgression
from torch import nn
import torch
import torchvision.models
from torchvision.models import ResNet101_Weights
import numpy as np
from functools import cached_property
from trainer import Trainer
from torch.utils.data import Dataset
import pandas as pd
import cv2
import albumentations as aug
import albumentations.pytorch as aug_torch
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from losses import IB_FocalLoss
import time

from circle_loss import convert_label_to_similarity, CircleLoss


class DiseaseClassifier(nn.Module):
    """独立疾病分类模块"""
    def __init__(self, in_dim=512):
        super().__init__()
        self.attention = nn.Sequential(
            nn.Linear(in_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 7),
            nn.Softmax(dim=1)  # 疾病关注权重
        )
        self.fc = nn.Sequential(
            nn.Linear(in_dim, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.6),
            nn.Linear(512, 7)
        )
        
    def forward(self, x):
        attn_weights = self.attention(x)
        return self.fc(x) * attn_weights  # 注意力加权


class DeepSurModel(nn.Module):
    def __init__(self, num_classes=1, backbone='resnet101'):
        super().__init__()
        self.cnn = ModelProgression(backbone=backbone, output_size=512)
        # self._initialize_weights()

        # 暂且弃置
        self.binary_feature = nn.Sequential(
            nn.Linear(512, 256),
            nn.InstanceNorm1d(256),  # 使用实例归一化解耦
            nn.ReLU(),
            nn.Dropout(0.3),  
            # SpatialDropout1d(0.3),  
        )
        self.binary_head = nn.Linear(256, 1)
        
        # 多分类特征提取（独立路径）
        self.disease_feature = nn.Sequential(
            nn.Linear(512, 256),
            nn.LayerNorm(256),  # 使用层归一化解耦
            nn.ReLU(),
            nn.Dropout(0.3),
        )
        self.disease_head = nn.Linear(256, 7)

        self.fusion = nn.Sequential(
            nn.Linear(512, 512),
            nn.ReLU()
        )

        # 添加归一化层
        self.addition_norm = nn.BatchNorm1d(512)

        # self.disease_head = DiseaseClassifier()
        # self._initialize_weights()

    def _initialize_weights(self):
        for m in self.binary_feature.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
        if isinstance(self.binary_head, nn.Linear):
            nn.init.xavier_uniform_(self.binary_head.weight)
            if self.binary_head.bias is not None:
                nn.init.constant_(self.binary_head.bias, 0)

        for m in self.disease_feature.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
        if isinstance(self.disease_head, nn.Linear):
            nn.init.xavier_uniform_(self.disease_head.weight)
            if self.disease_head.bias is not None:
                nn.init.constant_(self.disease_head.bias, 0)

        for m in self.fusion.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # left_feature = self.cnn(left_x)
        # right_feature = self.cnn(right_x)
        # base_feat = self.fusion(torch.cat((left_feature, right_feature), dim=1))
        # base_feat = self.cnn(x)
        # base_feat = self.addition_norm(left_feature + right_feature)
        # base_feat = self.fusion(base_feat)
        base_feat = self.cnn(x)
        
        # 解耦特征
        binary_feat = self.binary_feature(base_feat)
        binary_logits = self.binary_head(binary_feat)

        disease_feat = self.disease_feature(base_feat)
        disease_logits = self.disease_head(disease_feat)

        # disease_logits = self.disease_head(base_feat)
        
        return binary_logits, disease_logits, binary_feat, disease_feat

class ProgressionData(Dataset):

    def __init__(self, datasheet, transform, mode, preprocess=False):
        super().__init__()
        
        # print(self.df)
        self.transform = transform
        self.disease_columns = ['N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
        self.abnormal_columns = ['D', 'G', 'C', 'A', 'H', 'M', 'O']
        
        print(preprocess)
        if mode == 'train':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Training Set/preprocessed/" if preprocess else "/home/zhangyichi/dataset/OIA-ODIR/Training Set/Images/"
            self.df = pd.read_csv(datasheet + 'train single.csv')
            # print(self.df[:10])
        elif mode == 'test':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/preprocessed/" if preprocess else "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'test single.csv')
        elif mode == 'val':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/validation/image/" if preprocess else "/home/zhangyichi/dataset/OIA-ODIR/validation/image/"
            self.df = pd.read_csv('/home/zhangyichi/dataset/OIA-ODIR/answer.csv')
            # self.df = pd.concat([self.left_df, self.right_df], axis=0)
        else:
            print("Fail to load dataset")
            raise Exception
        
        print(self.path_prefix)
        self.df['is_healthy'] = ((self.df['N'] == 1) & (self.df[self.abnormal_columns] == 0).all(axis=1)).astype(int)
        
    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        # 读取左右眼图像路径
        img_file = self.path_prefix + self.df.iloc[idx]['Fundus']
        # left_img_file = self.path_prefix + self.df.iloc[idx]['Left-Fundus']
        # right_img_file = self.path_prefix + self.df.iloc[idx]['Right-Fundus']

        if not os.path.exists(img_file):    
            img_file = '/data/zhangyichi/collected_dataset/' + self.df.iloc[idx]['Fundus']
        # 读取左右眼图像
        # left_image = cv2.imread(left_img_file, cv2.IMREAD_COLOR)
        # right_image = cv2.imread(right_img_file, cv2.IMREAD_COLOR)
        image = cv2.imread(img_file, cv2.IMREAD_COLOR)

        # 应用数据变换
        # left_image = self.transform(image=left_image)['image']
        # right_image = self.transform(image=right_image)['image']
        image = self.transform(image=image)['image']
        # 读取疾病标签
        labels = self.df.iloc[idx][self.disease_columns].values.astype(float)
        is_healthy = self.df.iloc[idx]['is_healthy'].astype(float)
        disease_labels = self.df.iloc[idx][self.abnormal_columns].values.astype(float)

        # image = torch.cat([left_image, right_image], dim=0)

        return dict(
            # left_image=left_image,
            # right_image=right_image,
            image=image,
            labels=labels,
            binary_labels=is_healthy,
            disease_labels=disease_labels
        )

    # 获取每个类别有多少样本，列表形式返回
    def get_cls_num_list(self):
        labels = self.df.loc[:, self.disease_columns].values.astype(float)
        cls_num_list = []
        for i in range(len(self.disease_columns)):
            class_labels = labels[:, i]
            num = np.sum(class_labels)
            cls_num_list.append(num)
        print('cls_num_list: ', cls_num_list)
        return cls_num_list

class TrainerDR(Trainer):

    # 加载预训练的ResNet权重，在ModelProgression中又添加了注意力机制，用于提取图像特征
    @cached_property
    def model(self):
        model = DeepSurModel(self.cfg.model).to(self.device)
        if self.cfg.load_pretrain is not None:
            print('loading ', self.cfg.load_pretrain)
            print(model.cnn.backbone.load_state_dict(
                torch.load(self.cfg.load_pretrain, map_location=self.device)
            ))
        else:
            print("Not load pretrained model")
            weights = ResNet101_Weights.DEFAULT
            base_backbone_full = torchvision.models.resnet101(weights=weights)
            modules = list(base_backbone_full.children())[:-1] # 去掉最后一个元素 (fc层)
            model.cnn.backbone = torch.nn.Sequential(*modules)
            if self.cfg.load_checkpoint is not None:
                print('Load checkpoint')
                checkpoint = torch.load(self.cfg.load_checkpoint, map_location=self.device, weights_only=False)
                model.load_state_dict(checkpoint['model_state_dict'])
                self.cfg.start_epoch = checkpoint['epoch'] + 1
                self.cfg.epochs = 150
        return model

    @cached_property
    def beta(self):
        return 1
    
    # 通过训练集的各类样本数，以IBReweight的方式计算权重
    @cached_property
    def weights(self):
        cls_num_list = self.train_dataset.get_cls_num_list()
        cls_num_list.pop(0)
        # print('cls_num_list: ', cls_num_list)
        per_cls_weights = 1.0 / np.array(cls_num_list)
        per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
        per_cls_weights = torch.FloatTensor(per_cls_weights).to(self.device)
        # print('weight: ', per_cls_weights)
        return per_cls_weights


    @cached_property
    def train_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.Flip(p=0.5),
            aug.ImageCompression(quality_lower=10, quality_upper=80, p=0.2),
            aug.MedianBlur(p=0.3),
            aug.RandomBrightnessContrast(p=0.5),
            aug.RandomGamma(p=0.2),
            aug.GaussNoise(p=0.2),
            aug.Rotate(border_mode=cv2.BORDER_CONSTANT,
                       value=0, p=0.7, limit=45),
            # aug.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Training Set/Annotation/', transform, mode='train', preprocess=self.cfg.bright_adjust)

    # 更名为val_dataset，加载的实际上是验证集
    @cached_property
    def val_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Annotation/', transform, mode='val', preprocess=self.cfg.bright_adjust)

    @cached_property
    def test_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            # aug.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', transform, mode='test', preprocess=self.cfg.bright_adjust)

    @cached_property
    def optimizer(self):
        optimizer = torch.optim.Adam(
            self.model.parameters(), lr=self.cfg.lr, weight_decay=1e-4)
        return optimizer

    # 处理每个batch的数据，包括数据准备、前向传播、计算loss、返回各种结果
    def batch(self, epoch, i_batch, data) -> dict:

        # get and prepare data elements
        # left_imgs = data['left_image'].to(self.device)
        # right_imgs = data['right_image'].to(self.device)
        imgs = data['image'].to(self.device)

        labels = data['labels'].to(self.device).float().squeeze(1)
        binary_labels = data['binary_labels'].to(self.device).float().unsqueeze(1)
        disease_labels = data['disease_labels'].to(self.device).float()

        binary_logits, multi_logits, binary_feat, multi_feat = self.model(imgs)
        # print(torch.sigmoid(binary_logits),torch.sigmoid(multi_logits))

        # # 疾病样本类别数量
        # count = [1801.0, 326.0, 313.0, 280.0, 193.0, 268.0, 1199.0]
        # # 权重设置
        # weights = [1 / c for c in count]
        # weights = torch.tensor(weights)
        # multi_alpha = nn.Softmax(dim=0)(weights).to(self.device)
        
        # binary_criterion = MultiFocalLoss(alpha=0.557, gamma=2).to(self.device)
        # multi_criterion = MultiFocalLoss(alpha=multi_alpha, gamma=2).to(self.device)
        binary_criterion = nn.BCEWithLogitsLoss()
        multi_criterion = nn.BCEWithLogitsLoss()
        ###################################################################################### 20250513
        circl_criterion = CircleLoss(m=0.25, gamma=80)
           
        binary_loss = binary_criterion(binary_logits, binary_labels)
        # focal_criterion = MultiFocalLoss(alpha=self.weights, gamma=2)
        ib_focal_criterion = IB_FocalLoss(weight=self.weights, alpha=1000, gamma=1)
        
        abnormal_mask = (binary_labels == 0).squeeze()  # 假设异常标签为0
        if abnormal_mask.sum() > 0:  # 存在异常样本时才计算
            multi_CELoss = multi_criterion(
                multi_logits[abnormal_mask], 
                disease_labels[abnormal_mask]
            )
            features = torch.sum(torch.abs(multi_feat[abnormal_mask]), 1).reshape(-1, 1)
            # multi_IBLoss = ib_focal_criterion(
            #     multi_logits[abnormal_mask], 
            #     disease_labels[abnormal_mask],
            #     features
            # )
            # multi_loss = multi_CELoss + multi_IBLoss
            multi_loss = multi_CELoss
        else:
            multi_loss = torch.tensor(0.0).to(self.device)
        # # 多分类梯度阻断
        # multi_logits = multi_logits * (1 - binary_labels)  # 正常样本梯度不传播
        # multi_loss = nn.BCEWithLogitsLoss()(
        #     multi_logits, 
        #     disease_labels
        # )

        # 计算动态权重
        with torch.no_grad():
            loss_ratio = (binary_loss.detach() + 1e-8)/ (multi_loss.detach() + 1e-8)
            beta = torch.clamp(loss_ratio, 0.5, 4.0)  # 限制权重范围

        total_loss = binary_loss + beta * multi_loss

        # 可以根据需要添加正则化项
        # loss += ...

        return dict(
            loss=total_loss,
            binary_pred=torch.sigmoid(binary_logits),
            multi_pred=torch.sigmoid(multi_logits),
            dynamic_beta=beta,
            binary_labels=binary_labels,
            multi_labels=disease_labels,
            labels=labels,
        )

    def hierarchical_predict(self,binary_pred, disease_pred, threshold=0.5):
        """
        合并预测结果：
        - 正常样本：类别0
        - 异常样本：类别1-7
        """
        binary_pred = torch.tensor(binary_pred, dtype=torch.float32)
        disease_pred = torch.tensor(disease_pred, dtype=torch.float32)

        final_pred = torch.zeros(binary_pred.shape[0], 8)  # 8分类结果
        # print(final_pred,"WWW")
        # 正常样本标记为类别0
        normal_mask = (binary_pred >= threshold).squeeze()
        final_pred[normal_mask, 0] = 1
        # print(final_pred[:10],"QQQ")

        # 异常样本选择疾病的类别
        abnormal_mask = ~normal_mask
        if abnormal_mask.sum() > 0:
            disease_labels = (disease_pred[abnormal_mask] >= threshold).float()  # 转换为0/1
            # print(disease_labels[:10],"RRR")
            final_pred[abnormal_mask, 1:] = disease_labels  # 更新类别1-7

        return final_pred

    def metrics(self, epoch, data) -> dict:
        # 忽略UndefinedMetricWarning警告
        warnings.filterwarnings("ignore", category=UserWarning, module="sklearn.metrics._classification")
        # 获取预测概率
        binary_pred = data['binary_pred'].cpu().numpy()
        multi_pred = data['multi_pred'].cpu().numpy()
        labels = data['labels'].cpu().numpy()
        binary_labels = data['binary_labels'].cpu().numpy()
        multi_labels = data['multi_labels'].cpu().numpy()
        # print(binary_labels.shape, multi_labels.shape)
        # print(binary_pred[:10],"AAAA")
        # print(binary_labels[:10],"BBBB")
        # print(multi_pred[:10],"BBBB")

        # 获取最终预测结果
        final_pred = self.hierarchical_predict(binary_pred, multi_pred).numpy().astype(int)
        
        
        # print(labels[:10],"BBB")

        # 计算分类准确率
        acc = accuracy_score(labels, final_pred)
        binary_acc = accuracy_score(binary_labels, (binary_pred >= 0.5).astype(int))
        multi_acc = accuracy_score(multi_labels, (multi_pred >= 0.5).astype(int))
        
        # 计算精确率，使用宏平均
        precision = precision_score(labels, final_pred, average='macro')
        
        # 计算召回率，使用宏平均
        recall = recall_score(labels, final_pred, average='macro')
        
        # 计算 AUC
        auc = roc_auc_score(labels, final_pred, average='macro')


        result = dict(
            beta=float(data['dynamic_beta'].mean()),
            loss=float(data['loss'].mean()),
            accuracy=acc,
            binary_acc=binary_acc,
            multi_acc=multi_acc,
            precision=precision,
            recall=recall,
            auc=auc
        )
        # print("Result: ", result)
        return result



if __name__ == '__main__':
    trainer = TrainerDR()
    current_time = time.strftime("%Y_%m%d_%H%M", time.localtime())
    print(current_time)
    
    trainer.train()
    
    current_time = time.strftime("%Y_%m%d_%H%M", time.localtime())
    print(current_time)
