import os
import inspect

import lightning as L
import torch
import torch.nn as nn
import torchvision
from lightning.pytorch.loggers import CSVLogger

from lib.lr_scheduler import get_lr_scheduler
from lib.metrics import cal_params_flops, get_test_binary_metrics, get_train_valid_metrics, update_miou_metrics, \
    update_other_binary_metrics
from lib.optimizer import get_optimizer


class SimpleImageSegmentationModel(L.LightningModule):
    def __init__(self, net, opt, loss_func):
        super().__init__()
        self.net = net
        self.loss_func = loss_func
        self.normalization = nn.Sigmoid()
        self.opt = opt
        metrics = get_train_valid_metrics()
        self.train_metrics = metrics.clone(prefix='train/')
        self.valid_metrics = metrics.clone(prefix='val/')
        self.test_metrics = get_test_binary_metrics().clone(prefix='test/')
        self.test_base_metrics = metrics.clone(prefix='test/')
        self.train_losses = []
        self.valid_losses = []

    def forward(self, x):
        return self.net(x)

    def _on_step(self, batch, batch_idx, metrics):
        input_tensor, target, _ = batch
        output = self(input_tensor)
        has_normalization = False
        try:
            has_normalization = self.net.has_final_normalization
        except:
            pass
        if not has_normalization:
            output = self.normalization(output)
        update_miou_metrics(metrics, output, target)
        loss = self.loss_func(output, target)
        return loss

    def on_train_epoch_start(self):
        self.train_metrics.reset()
        self.train_losses.clear()

    def training_step(self, batch, batch_idx):
        # 检查网络是否实现了自定义的training_step方法
        has_step = self._has_custom_method(self.net, "training_step")

        if has_step:
            loss = self.net.training_step(batch, batch_idx,
                                          train_metrics=self.train_metrics,
                                          normalization=self.normalization)
        else:
            loss = self._on_step(batch, batch_idx, self.train_metrics)
        self.train_losses.append(loss.item())
        return loss

    def _has_custom_method(self, obj, method_name):
        """
        检查对象是否实现了自定义的方法
        使用inspect模块来检查方法是否被重写
        """
        if not hasattr(obj, method_name):
            return False
        
        method = getattr(obj, method_name)
        if not callable(method):
            return False
        
        # 获取方法的源代码
        try:
            source = inspect.getsource(method)
            # 如果方法有实际的实现（不是pass或简单的return），则认为被重写了
            source_lines = [line.strip() for line in source.split('\n') if line.strip()]
            # 过滤掉只有pass、return、docstring等简单实现
            meaningful_lines = [line for line in source_lines 
                              if line and not line.startswith('#') 
                              and line != 'pass' 
                              and not line.startswith('"""') 
                              and not line.startswith("'''")
                              and not line.startswith('return')]
            return len(meaningful_lines) > 0
        except (OSError, TypeError):
            # 如果无法获取源代码（比如C扩展），则使用更简单的方法
            return hasattr(method, '__code__') and method.__code__.co_code != b'd\x00S\x00'

    def on_train_epoch_end(self):
        dic = self.train_metrics.compute()
        self.log('train_MIOU', dic['train/MeanIoU'], prog_bar=True, on_epoch=True, sync_dist=True)
        self.log('train_loss', torch.tensor(self.train_losses).mean(), prog_bar=True, on_epoch=True, sync_dist=True)

    def on_validation_epoch_start(self):
        self.valid_metrics.reset()
        self.valid_losses.clear()

    def validation_step(self, batch, batch_idx):
        # 检查网络是否实现了自定义的validation_step方法
        has_step = self._has_custom_method(self.net, "validation_step")

        if has_step:
            loss = self.net.validation_step(batch, batch_idx, valid_metrics=self.valid_metrics,
                                            normalization=self.normalization)
        else:
            loss = self._on_step(batch, batch_idx, self.valid_metrics)
        self.valid_losses.append(loss.item())
        return loss

    def on_validation_epoch_end(self):
        dic = self.valid_metrics.compute()
        self.log('val_MIOU', dic['val/MeanIoU'], prog_bar=True, on_epoch=True, sync_dist=True)
        self.log('val_loss', torch.tensor(self.valid_losses).mean(), prog_bar=True, on_epoch=True, sync_dist=True)

    def test_step(self, batch, batch_idx):
        # 检查网络是否实现了自定义的test_step方法
        has_step = self._has_custom_method(self.net, "test_step")
        
        input_tensor, target, image_names = batch
        if has_step:
            output = self.net.test_step(batch, batch_idx)
        else:
            output = self(input_tensor)
        has_normalization = False
        try:
            has_normalization = self.net.has_final_normalization
        except:
            pass
        if not has_normalization:
            output = self.normalization(output)
        predict = output
        # 如果output是4维
        if len(output.shape) == 4:
            if output.shape[1] != 1:
                predict = torch.argmax(output, dim=1)
            else:
                # torch.Size([1, 1, 256, 256]) => torch.Size([1, 256, 256])
                predict = output.squeeze(1)
        # 将预测图像进行分割
        if not self.opt.forbid_metrics:
            update_other_binary_metrics(self.test_metrics, output, target)
            update_miou_metrics(self.test_base_metrics, output, target)

        for i in range(predict.size(0)):
            # predict[predict == 1] =
            base_path = self.opt.result_dir
            os.makedirs(base_path, exist_ok=True)

            torchvision.utils.save_image(
                predict[i].float(),
                os.path.join(
                    self.opt.result_dir,
                    f'{image_names[i]}.png'
                )
            )

    def on_test_epoch_end(self):
        if self.opt.forbid_metrics:
            return
        params, flops = cal_params_flops(self.net, self.device, self.opt.resize_shape)
        logger = CSVLogger(self.opt.result_dir, name='result', version="")
        dic = self.test_metrics.compute()
        dic.update(self.test_base_metrics.compute())
        self.log_dict(dic, prog_bar=True, on_epoch=True, sync_dist=True)
        dic['Parameters'] = params
        dic['FLOPs'] = flops
        logger.log_metrics(dic)
        logger.save()
        self.test_metrics.reset()

    def configure_optimizers(self):
        optimizer = get_optimizer(self.opt, self.net)
        lr_schedulers = get_lr_scheduler(optimizer, self.opt)

        if lr_schedulers is None:
            return {
                "optimizer": optimizer,
            }

        lr_scheduler_lightning_config = self.opt.lr_scheduler_config.lightning_config
        lr_scheduler_lightning_config = lr_scheduler_lightning_config.get_dict() if lr_scheduler_lightning_config is not None else {}

        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": lr_schedulers,
                **lr_scheduler_lightning_config,
            }
        }
