import torch
import pytorch_lightning as pl
from monai.transforms import AsDiscrete
from monai.data import decollate_batch
from models import MODELS
from utils.losses import LOSSES
from utils.metrics import METRICS

class LitMonai2DModel(pl.LightningModule):
    def __init__(self, config):
        super().__init__()
        self.save_hyperparameters()
        self.config = config
        
        # 使用注册器创建模型
        model_class = MODELS.get(config['model']['model_type'])
        model_args = config['model'].get('model_args', {})
        self._model = model_class(**model_args)
        
        # 使用注册器创建损失函数
        loss_class = LOSSES.get(config['loss']['loss_type'])
        loss_args = config['loss'].get('loss_args', {})
        self.loss_function = loss_class(**loss_args)
        
        # 使用注册器创建评估指标
        metric_class = METRICS.get(config['metric']['metric_type'])
        metric_args = config['metric'].get('metric_args', {})
        self.dice_metric = metric_class(**metric_args)
        
        # 获取输出通道数用于后处理
        out_channels = config['model']['model_args']['out_channels']
        self.post_pred = AsDiscrete(argmax=True, to_onehot=out_channels)
        self.post_label = AsDiscrete(to_onehot=out_channels)
        
    def forward(self, x):
        return self._model(x)
        
    def training_step(self, batch, batch_idx):
        images, labels = batch["image"], batch["label"]
        outputs = self.forward(images)
        loss = self.loss_function(outputs, labels)
        self.log("train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
        return loss
        
    def validation_step(self, batch, batch_idx):
        images, labels = batch["image"], batch["label"]
        outputs = self.forward(images)
        loss = self.loss_function(outputs, labels)
        self.log("val_loss", loss, prog_bar=True)
        
        # 计算指标
        outputs = [self.post_pred(i) for i in decollate_batch(outputs)]
        labels = [self.post_label(i) for i in decollate_batch(labels)]
        self.dice_metric(y_pred=outputs, y=labels)
        
    def on_validation_epoch_end(self):
        mean_dice = self.dice_metric.aggregate().item()
        self.dice_metric.reset()
        self.log("val_mean_dice", mean_dice, prog_bar=True)
        
    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), self.config['model']['learning_rate'])