import math
import warnings
from model import ModelProgression
from torch import nn
import torch
import numpy as np
from functools import cached_property
from trainer import Trainer
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import cv2
import albumentations as aug
import albumentations.pytorch as aug_torch
from imblearn.over_sampling import SMOTE
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from losses import IB_FocalLoss, IBLoss, FocalLoss, MultiFocalLoss


# NOTATATION_LEFT_PATH = 
# NOTATATION_RIGHT_PATH = 

class DeepSurModel(nn.Module):
    def __init__(self, num_classes=8, backbone='resnet50'):
        super().__init__()
        self.cnn = ModelProgression(backbone=backbone, output_size=512)
        self.classifier = nn.Sequential(
            # nn.Linear(512 * 2, 256),
            nn.Linear(512, 256),
            nn.Sigmoid(),
            nn.BatchNorm1d(256),
            nn.Linear(256, 64),
            nn.Sigmoid(),
            nn.BatchNorm1d(64),
            nn.Linear(64, num_classes),
            # nn.Sigmoid() 
            # 后续有用于计算最终损失的函数，这里不需要使用sigmoid
        )
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.classifier.modules():
            if isinstance(m, nn.Linear):
                # 使用 Xavier 初始化
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x): 
        x = self.cnn(x)

        # features = self.classifier[:5](x)
        logits = self.classifier(x)  
        # print(logits.shape)
        return logits     # 输出logits 

# 使用ModelProgression部分提取特征，用于过采样
def extract_features(model_cnn, dataloader, device):
    print("Extracting features for SMOTE")
    all_features = []
    all_labels = []
    model_cnn.eval()
    with torch.no_grad():
        for data in dataloader:
            images = data['image'].to(device)
            labels = data['labels'].numpy()

            base_feat = model_cnn(images)
            features = base_feat.cpu().numpy()

            all_features.append(features)
            all_labels.append(labels)

    all_features = np.concatenate(all_features, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)
    return all_features, all_labels

class FeatureDataset(Dataset): 
    def __init__(self, features, labels):
        self.features = features
        self.labels = labels

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        feature = self.features[idx] #  这里 features 已经是 Tensor 了
        label = self.labels[idx]
        return dict(
            image=feature,
            labels=label,
        )

class ProgressionData(Dataset):

    def __init__(self, datasheet, transform, mode):
        super().__init__()
            
        self.transform = transform
        self.label_columns = ['N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
        self.abnormal_columns = ['D', 'G', 'C', 'A', 'H', 'M', 'O']
        self.mode = mode
        # self.df['is_healthy'] = ((self.df['N'] == 1) & (self.df[self.abnormal_columns] == 0).all(axis=1)).astype(int)
        
        if mode == 'train':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Training Set/Images/"
            self.df = pd.read_csv(datasheet + 'train single.csv')
        elif mode == 'test':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'test single.csv')
        elif mode == 'formal test':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'test.csv')
        elif mode == 'val':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Images/"
            self.df = pd.read_csv(datasheet + 'val single.csv')
            # self.df = pd.concat([self.left_df, self.right_df], axis=0)
        elif mode == 'formal test single':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/validation/image/"
            self.df = pd.read_csv(datasheet + 'answer.csv')
        elif mode == 'formal test double':
            self.path_prefix = "/home/zhangyichi/dataset/OIA-ODIR/validation/image/"
            self.df = pd.read_csv(datasheet + 'double_answer.csv')
        else:
            print("Fail to load dataset")
            raise Exception

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        if self.mode == 'formal test' or self.mode == 'formal test double':
            left_filename = self.df.iloc[idx]['Left-Fundus']
            right_filename = self.df.iloc[idx]['Right-Fundus']
            
            left_img_file = self.path_prefix + left_filename
            right_img_file = self.path_prefix + right_filename

            left_image = cv2.imread(left_img_file, cv2.IMREAD_COLOR)
            right_image = cv2.imread(right_img_file, cv2.IMREAD_COLOR)

            left_image = self.transform(image=left_image)['image']
            right_image = self.transform(image=right_image)['image']
            labels = self.df.iloc[idx][self.label_columns].values.astype(float)
            return dict(
                left_image=left_image,
                right_image=right_image,
                labels=labels,
                left_filename=left_filename,    
                right_filename=right_filename 
            )
        else:
            filename = self.df.iloc[idx]['Fundus']
            img_file = self.path_prefix + filename
            
            image = cv2.imread(img_file, cv2.IMREAD_COLOR)
            image = self.transform(image=image)['image']

            labels = self.df.iloc[idx][self.label_columns].values.astype(float)
            # labels = self.df.iloc[idx]['is_healthy'].astype(float)
            # labels_abnormal = self.df.iloc[idx][self.abnormal_columns].values.astype(float)
            return dict(
                image=image,
                labels=labels,
                filename=filename
            )
        
    # 获取每个类别有多少样本，列表形式返回
    def get_cls_num_list(self):
        labels = self.df.loc[:, self.label_columns].values.astype(float)
        cls_num_list = []
        for i in range(len(self.label_columns)):
            class_labels = labels[:, i]
            num = np.sum(class_labels)
            cls_num_list.append(num)
            # print('cls_num_list: ', cls_num_list)
        return cls_num_list     
   

class TrainerDR(Trainer):
    def __init__(self):
        super().__init__()
        self.test_transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        self.model = DeepSurModel(backbone=self.cfg.model).to(self.device)
        # 根据配置的参数确定是否加载checkpoint继续训练
        if self.cfg.load_checkpoint is not None:
            print('Load checkpoint')
            checkpoint = torch.load(self.cfg.load_checkpoint, map_location=self.device, weights_only=False)
            self.model.load_state_dict(checkpoint['model_state_dict'])
            self.cfg.start_epoch = checkpoint['epoch'] + 1
        else:
            print("Not load checkpoint")
            if self.cfg.load_pretrain is not None:
                print('loading pretrained model', self.cfg.load_pretrain)
                print(self.model.cnn.backbone.load_state_dict(
                    torch.load(self.cfg.load_pretrain, map_location=self.device, weights_only=True)
                ))
            else:
                print("Not load pretrained model")

        
    @cached_property
    def beta(self):
        return 1

    # 通过训练集的各类样本数，以IBReweight的方式计算权重
    @cached_property
    def weights(self):
        cls_num_list = self.train_dataset.get_cls_num_list()
        # print('cls_num_list: ', cls_num_list)
        per_cls_weights = 1.0 / np.array(cls_num_list)
        per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
        per_cls_weights = torch.FloatTensor(per_cls_weights).to(self.device)
        # print('weight: ', per_cls_weights)
        return per_cls_weights

    @cached_property
    def train_dataset(self):
        transform = aug.Compose([
            aug.SmallestMaxSize(
                max_size=self.cfg.image_size, always_apply=True),
            aug.CenterCrop(self.cfg.image_size, self.cfg.image_size,
                           always_apply=True),
            aug.Flip(p=0.5),
            aug.ImageCompression(quality_lower=10, quality_upper=80, p=0.2),
            aug.MedianBlur(p=0.3),
            aug.RandomBrightnessContrast(p=0.5),
            aug.RandomGamma(p=0.2),
            aug.GaussNoise(p=0.2),
            aug.Rotate(border_mode=cv2.BORDER_CONSTANT,
                       value=0, p=0.7, limit=45),
            # aug.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
            aug.ToFloat(always_apply=True),
            aug_torch.ToTensorV2(),
        ])
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Training Set/Annotation/', transform, mode='train')

    # 更名为val_dataset，加载的实际上是验证集
    @cached_property
    def val_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/Off-site Test Set/Annotation/', self.test_transform, mode='val')

    @cached_property
    def test_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', self.test_transform, mode='test')

    # 用于双眼的预测
    @cached_property
    def formal_test_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/On-site Test Set/Annotation/', self.test_transform, mode='formal test')

    @cached_property
    def formal_test_single_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/validation/', self.test_transform, mode='formal test single')

    @cached_property
    def formal_test_double_dataset(self):
        return ProgressionData('/home/zhangyichi/dataset/OIA-ODIR/validation/', self.test_transform, mode='formal test double')

    @cached_property
    def optimizer(self):
        optimizer = torch.optim.Adam(
            self.model.parameters(), lr=self.cfg.lr, weight_decay=1e-5)
        if self.cfg.load_checkpoint is not None:
            checkpoint = torch.load(self.cfg.load_checkpoint, map_location=self.device, weights_only=False)
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        return optimizer

    @cached_property
    def train_loader(self):
        loader = DataLoader(
            self.train_dataset,
            batch_size=self.cfg.batch_size,
            shuffle=True,
            num_workers=self.cfg.num_workers,
        )
        features_train, labels_train = extract_features(self.model.cnn, loader, self.device)
        smote = MLSMOTE(random_state=42)
        oversampled_features_train, oversampled_labels_train = smote.fit_resample(features_train.reshape(features_train.shape[0], -1), labels_train)
        oversampled_features_train = oversampled_features_train.reshape(oversampled_features_train.shape[0], features_train.shape[1]) # 恢复形状
        print("过采样后各类别样本数量:")
        unique_labels, counts = np.unique(oversampled_labels_train, return_counts=True)
        print(dict(zip(unique_labels, counts)))

        # --- 创建过采样后特征的训练集 Dataset 和 DataLoader ---
        oversampled_train_dataset = FeatureDataset(torch.tensor(oversampled_features_train, dtype=torch.float32), torch.tensor(oversampled_labels_train, dtype=torch.long)) 
        oversampled_train_loader = DataLoader(oversampled_train_dataset, batch_size=self.cfg.batch_size, shuffle=True, num_workers=self.cfg.num_workers) 
        return oversampled_train_loader

    # 处理每个batch的数据，包括数据准备、前向传播、计算loss、返回各种结果
    def batch(self, epoch, i_batch, data) -> dict:
        imgs = data['image'].to(self.device)
        labels = data['labels'].to(self.device).float()
        
        labels = labels.squeeze(1)
       
        predictions = self.model.classifier(imgs)
        # features = torch.sum(torch.abs(features), 1).reshape(-1, 1)

        # 定义损失函数，这里使用多标签交叉熵损失
        bce_criterion = nn.BCEWithLogitsLoss()
        # ib_focal_criterion = IB_FocalLoss(weight=self.weights, alpha=1000, gamma=3, num_classes=8)
        focal_criterion = MultiFocalLoss(gamma=2, alpha=self.weights, )
        # if epoch < self.cfg.epochs // 2:
        #     loss = bce_criterion(predictions, labels)
        # else:
        #     loss = ib_focal_criterion(predictions, labels, features) 
        loss = bce_criterion(predictions, labels)

        return dict(
            loss=loss,
            predictions=predictions,
            labels=labels,
        )


    def metrics(self, epoch, data) -> dict:
        # 忽略UndefinedMetricWarning警告
        warnings.filterwarnings("ignore", category=UserWarning, module="sklearn.metrics._classification")
        # 获取预测概率
        logits = data['predictions'].cpu().numpy()
        probabilities = torch.sigmoid(torch.tensor(logits)).numpy()

        # 设定阈值，将概率转换为预测标签
        threshold = 0.5
        predictions = (probabilities > threshold).astype(int)
        
        # 获取真实标签
        labels = data['labels'].cpu().numpy()

        # 计算分类准确率
        acc = accuracy_score(labels, predictions)
        
        # 计算精确率，使用宏平均
        precision = precision_score(labels, predictions, average='macro')
        
        # 计算召回率，使用宏平均
        recall = recall_score(labels, predictions, average='macro')
        
        # 计算 AUC
        auc = roc_auc_score(labels, probabilities, average='macro')


        result = dict(
            loss=float(data['loss'].mean()),
            accuracy=acc,
            precision=precision,
            recall=recall,
            auc=auc
        )
        # print("Result: ", result)
        return result



if __name__ == '__main__':
    trainer = TrainerDR()
    trainer.train()
