import sys, os, json
sys.path.append('..')

import lightning as L
import torch
import numpy as np
from models import get_model
from loss import get_loss_terms
from torchmetrics import Accuracy, AUROC
import torch.nn as nn

class ViLref(L.LightningModule):
    def __init__(self, backbone, opti_cfg=None, scheduler_cfg=None, 
                 embed_dim=768, num_class=4, batch_size=32, 
                 loss_terms=None):
        super(ViLref, self).__init__()
        self.save_hyperparameters()
        self.backbone = get_model(backbone)
        self.head = nn.Linear(embed_dim, num_class)
        self.opti_cfg = opti_cfg if opti_cfg else self.default_optimizer_cfg()
        self.scheduler_cfg = scheduler_cfg if scheduler_cfg else self.default_scheduler_cfg()
        loss_term_cfg = loss_terms if loss_terms else self.default_loss_terms()
        self.loss_terms = get_loss_terms(loss_term_cfg)
        self.batch_size = batch_size
        self.test_step_labels = []
        self.test_step_predictions = []
        
        # 训练指标：只保留 accuracy
        self.train_accuracy = Accuracy(task="multiclass", num_classes=num_class)
        
        # 验证指标：保留 accuracy，添加 auc
        self.val_accuracy = Accuracy(task="multiclass", num_classes=num_class)
        self.val_auc = AUROC(task="multiclass", num_classes=num_class)  # 支持多分类和二分类
    
    def forward(self, imgs):
        clst_tokens = self.backbone(imgs)
        logits = self.head(clst_tokens)
        return logits

    def training_step(self, batch, batch_idx):
        imgs, labels_dict = batch
        labels = labels_dict['label']
        logits = self(imgs)
        loss = self.compute_loss(logits, labels)
        
        # 计算预测并更新 accuracy
        preds = torch.argmax(logits, dim=1)
        self.train_accuracy(preds, labels)
        
        # 记录 loss
        self.log('train_loss', loss, batch_size=self.batch_size, 
                 on_step=False, on_epoch=True, prog_bar=True, logger=True)
        
        return loss
    
    def on_train_epoch_end(self):
        # 记录 accuracy
        acc = self.train_accuracy.compute()
        self.log('train_acc', acc, on_step=False, on_epoch=True, prog_bar=True, logger=True)
        self.train_accuracy.reset()

    def compute_loss(self, logits, labels):
        total_loss = 0.0
        for name, loss_fn in self.loss_terms.items():
            total_loss += loss_fn(logits, labels)
        return total_loss

    def validation_step(self, batch, batch_idx):
        imgs, labels_dict = batch
        labels = labels_dict['label']
        logits = self(imgs)
        loss = self.compute_loss(logits, labels)
        
        # 计算预测和概率
        preds = torch.argmax(logits, dim=1)
        probs = torch.softmax(logits, dim=1)  # AUC 需要概率
        self.val_accuracy(preds, labels)
        self.val_auc(probs, labels)
        
        # 记录验证 loss
        self.log('val_loss', loss, batch_size=self.batch_size,
                 on_step=False, on_epoch=True, prog_bar=True, logger=True)
        
        return loss
    
    def on_validation_epoch_end(self):
        # 记录验证指标
        acc = self.val_accuracy.compute()
        auc = self.val_auc.compute()
        self.log_dict({
            'val_acc': acc,
            'val_auc': auc
        }, on_step=False, on_epoch=True, prog_bar=True, logger=True)
        self.val_accuracy.reset()
        self.val_auc.reset()

    def default_optimizer_cfg(self):
        return {
            'name': 'adamw',
            'lr': 1e-3,
            'betas': (0.9, 0.999),
            'eps': 1e-8,
            'weight_decay': 1e-4,
            'amsgrad': False
        }

    def get_optimizer(self, cfg, parameters):
        name = cfg.get('name', 'adamw').lower()
        optimizer_mapping = {
            'adamw': torch.optim.AdamW,
            'adam': torch.optim.Adam,
            'sgd': torch.optim.SGD,
            'rmsprop': torch.optim.RMSprop,
            'adagrad': torch.optim.Adagrad
        }
        if name not in optimizer_mapping:
            raise ValueError(f"不支持的优化器：{name}")
        optimizer_cls = optimizer_mapping[name]
        optimizer_params = cfg.copy()
        optimizer_params.pop('name', None)
        if name == 'sgd':
            momentum = optimizer_params.pop('momentum', 0.9)
            nesterov = optimizer_params.pop('nesterov', False)
            optimizer_params.update({'momentum': momentum, 'nesterov': nesterov})
        elif name == 'rmsprop':
            alpha = optimizer_params.pop('alpha', 0.99)
            momentum = optimizer_params.pop('momentum', 0)
            optimizer_params.update({'alpha': alpha, 'momentum': momentum})
        return optimizer_cls(parameters, **optimizer_params)

    def configure_optimizers(self):
        parameters = filter(lambda p: p.requires_grad, self.parameters())
        optimizer = self.get_optimizer(self.opti_cfg, parameters)
        scheduler = self.get_scheduler(self.scheduler_cfg, optimizer)
        return {
            'optimizer': optimizer,
            'lr_scheduler': scheduler
        }

    def get_scheduler(self, cfg, optimizer):
        name = cfg.get('name', 'step')
        if name == 'cosine':
            scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                optimizer,
                T_max=cfg.get('decay_epochs', 50),
                eta_min=cfg.get('lr_min', self.opti_cfg['lr'] / 1e2)
            )
        elif name == 'step':
            scheduler = torch.optim.lr_scheduler.StepLR(
                optimizer,
                step_size=cfg.get('decay_epochs', 30),
                gamma=cfg.get('decay_rate', 0.1)
            )
        elif name == 'plateau':
            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                optimizer,
                mode='min',
                factor=cfg.get('decay_rate', 0.1),
                patience=cfg.get('patience_epochs', 10),
                min_lr=cfg.get('lr_min', self.opti_cfg['lr'] / 1e2)
            )
        else:
            raise ValueError(f'不支持的调度器：{name}')
        return {
            'scheduler': scheduler,
            'interval': 'epoch',
            'monitor': 'val_loss' if name == 'plateau' else None
        }

    def default_loss_terms(self):
        return [
            dict(type='CrossEntropy', name='ce', avg=False, lam=1.0),
        ]

    def default_scheduler_cfg(self):
        return {
            'name': 'step',
            'lr_min': self.opti_cfg['lr'] / 1e2,
            'warmup_lr': self.opti_cfg['lr'] / 1e3,
            'warmup_iters': -1,
            'cooldown_iters': 0,
            'warmup_epochs': 5,
            'cooldown_epochs': 0,
            'use_iters': True,
            'patience_iters': 0,
            'patience_epochs': 5,
            'decay_iters': 0,
            'decay_epochs': 30,
            'cycle_decay': 0.1,
            'decay_rate': 0.1
        }