import os
import sys
import torch
import datetime
import argparse
import logging
import matplotlib
matplotlib.use('Agg')

from torch import optim
from random import randint
from torch.optim.lr_scheduler import ReduceLROnPlateau

from data.create_dataset import get_dataloader
from util.config_reader import reader
from modular_tasks import MultiMAEAutoencoderModule
from torch.utils.tensorboard import SummaryWriter
from data.create_dataset import get_dataloader
from models_fix.multimae3d import create_multimae 
import matplotlib.pyplot as plt
from tqdm import tqdm

parser = argparse.ArgumentParser()

# train
parser.add_argument('--config_path', type=str, default='./configs/train.yaml')
parser.add_argument('--log_dir',type=str, default='./logs')
parser.add_argument('--device',type=str,default='cuda:0')

def create_timestamp_dir(base_path, prefix="exp"):
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    if prefix:
        folder_name = f"{prefix}_{timestamp}"
    else:
        folder_name = timestamp
    full_path = os.path.join(base_path, folder_name)
    os.makedirs(full_path, exist_ok=True)
    print(f"create folder: {full_path}")
    return full_path

args = parser.parse_args()


class Trainer:
    def __init__(self, args):
        self.args = args
        self.config_path = args.config_path
        self.device = args.device
        self.log_dir = create_timestamp_dir(args.log_dir, prefix=None)
        
        logging.basicConfig(filename= os.path.join(self.log_dir+"/log.txt"), level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
        logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
        
        self.writer = SummaryWriter(self.log_dir)
        self.cfg = reader(self.config_path)
        # 初始化参数
        self.num_epochs = self.cfg.training.num_epochs
        self.ckpt_frequzency = self.cfg.training.ckpt_frequency
        self.loss_instances = self.cfg.training.loss.loss_instances
        self.loss_weights = self.cfg.training.loss.loss_weights
        self.input_tasks = self.cfg.model.model_params.input_tasks
        self.output_tasks = self.cfg.model.model_params.output_tasks
        self.adversarial_eta = self.cfg.training.adversarial_eta
        self.check_val_every_n_epoch = self.cfg.training.check_val_every_n_epoch
        self.log_metrics_every_nth_val = self.cfg.training.log_metrics_every_nth_val
        self.log_images_every_nth_val = self.cfg.training.log_images_every_nth_val
        self.clip_grad = self.cfg.training.clip_grad
        self.clip_grad_val = self.cfg.training.clip_grad_val
        self.ckpt_frequency = self.cfg.training.ckpt_frequency
        
        logging.info(str(self.cfg))
        
        # TODO
        self.sliding_window = None #( "sliding_window" in self.cfg.transforms and self.cfg.transforms.sliding_window )
        # if self.sliding_window:
        #     print("Create Sliding Window inferer")
        #     self.register_module( name="inferer",
        #         module=MultiMAESlidingWindowInferer(**cfg.transforms.sliding_window),
        #     )  # register as module so that lightning transfers to cuda later
        #     self.infere = lambda x: self.inferer(x, self.model)
        
        self._get_model()
        self._get_optimizer()
        self._get_scheduler()
        self._get_loader()
        self._get_task_types()
        self._get_task_loss_cfg()
        self._get_task_loss_weights()
        self._get_task_modules()
        self._get_loader()

    def _get_optimizer(self):
        self.optimizer = optim.AdamW(self.model.parameters(),lr=0.0001, weight_decay=0.05)

    def _get_scheduler(self):
        self.scheduler =  ReduceLROnPlateau(self.optimizer,mode='min', factor=0.1, patience=10)
    
    def _get_model(self):
        self.model = create_multimae(
            cfg=self.cfg,
            img_size=self.cfg.model.model_params.img_size,
            patch_size=self.cfg.model.model_params.patch_size,
            embed_dim=self.cfg.model.model_params.embed_dim,
            mask_ratio=self.cfg.model.model_params.mask_ratio,
            use_dirichlet=self.cfg.model.model_params.use_dirichlet,
            global_img_size=self.cfg.model.model_params.global_img_size,
            num_global_tokens=1,
            pos_embed_type='sincos',
            use_seg_masking=False,
            dirichlet_alpha=1.0,
            leave_one_out=False,
            apply_pos_embed_to_context=False
        ).to(self.device)
    
    def _get_task_types(self):
        task_types = {
            task : t
            for task_type in self.cfg.training.tasks.type
            for task, t in task_type.items()
        }
        task_types = {
            task: t 
            for task, t in task_types.items() if task in self.output_tasks
        }

        self.task_types = task_types
    
    def _get_task_loss_cfg(self):
        tasks = list(self.task_types.keys())
        losses = list(self.cfg.training.loss.loss_instances)
        assert len(tasks) == len(losses), "Number of tasks and losses must be the same"

        self.task_loss_cfg = dict(zip(tasks, losses))

    def _get_task_loss_weights(self):
        tasks = list(self.task_types.keys())
        loss_weights = self.cfg.training.loss.loss_weights

        assert len(tasks) == len( loss_weights), "Number of tasks and loss weights must be the same"
        loss_weights = dict(zip(tasks, loss_weights))
        
        self.task_loss_weights = loss_weights

    def _get_task_modules(self, type='MultiMAEAutoencoderModule'):
        # create a task module for each task
        if type == 'MultiMAEAutoencoderModule':
            self.task_modules = {
                task: MultiMAEAutoencoderModule(
                    self.cfg,
                    task=task,
                    loss_cfg=self.task_loss_cfg,
                    model=self.model
                ) 
                for task, _ in self.task_types.items()
            }
    
     # 获取固定不学习的参数
    def _get_ignore_parameters(self):
        self.ignore_parameters = []
        if 'encoder_pos_embed_type' not in self.cfg.model.model_params:
            return 
        if self.cfg.model.model_params.encoder_pos_embed_type == "global_sincos":
            print(f"ignore encoder_pos_embed for checkpoints to save space")
            self.ignore_parameters.append("model.pos_embed")
        for task, decoder in self.model.output_adapters.items(): 
            if getattr(decoder, 'pos_embed_type', None) == "global_sincos": 
                for name, _ in self.named_parameters(): 
                    if all([w in name for w in ['output_adapters', 'pos_embed', f".{task}."]]): 
                        print(f"ignore decoder pos_embed for task {task}")
                        self.ignore_parameters.append(name)

    def _get_loader(self):
        self.train_loader, self.val_loader = (
            get_dataloader(
                data_name=self.cfg.loader.data_name,
                config_path=self.cfg.loader.path,
                is_train=True,
                batch_size=self.cfg.loader.batch_size.train,
                shuffle=self.cfg.loader.shuffle.train,
                num_workers=self.cfg.loader.num_workers
            ),
            get_dataloader(
                data_name=self.cfg.loader.data_name,
                config_path=self.cfg.loader.path,
                is_train=False,
                batch_size=self.cfg.loader.batch_size.test,
                shuffle=self.cfg.loader.shuffle.test,
                num_workers=self.cfg.loader.num_workers
            ),
        )

    def on_save_checkpoint(self, checkpoint):
        # remove all parameters with key in self.ignore_parameters from checkpoint dict:
        for key in self.ignore_parameters:
            del checkpoint["state_dict"][key]

    # 进行模型推理
    def infere(self, batch):
        return self.model(batch, return_as_image=True, return_as_dict=True)

    def _training_loss(self, outputs: dict, batch: dict) -> tuple:
        # compute total weighted loss
        loss_vals = {}
        total_loss = torch.tensor(0, dtype=torch.float32)  # .to(inputs[0][0].device)
        for task, task_module in self.task_modules.items():
            loss_val = task_module.comp_loss(outputs, batch)
            total_loss = total_loss + (self.task_loss_weights[task] * loss_val)
            loss_vals[task] = loss_val.detach()
        return total_loss, loss_vals

    # 使用快速梯度符号法（FGSM）来生成对抗样本，返回生成的对抗样本
    def _generate_adversarial_batch(self, batch, adversarial_eta): 
        for task in self.input_tasks: 
            batch[task] = batch[task].detach().clone().requires_grad_(True)
        outputs = self.model(batch, return_as_image=False, return_as_dict=True)
        loss, _ = self._training_loss(outputs, batch)
        
        for i, task in enumerate(self.input_tasks): 
            grad = torch.autograd.grad(loss, batch[task],
                                        retain_graph=True)[0]
            torch.cuda.synchronize()
            task_grad = grad.detach()
            batch[task].requires_grad = False
            task_grad = torch.sign(task_grad) # we want only the sign value
            task_mask = (batch[task] != -1).float()  # only change foreground
            adv_perturbation = adversarial_eta * task_grad * task_mask
            # normalize input
            adversarial_task = (batch[task] + adv_perturbation) / (1 + adversarial_eta)
            batch[task] = adversarial_task.detach()
        return batch

    def _move_to_device(self, batch):
        if isinstance(batch, torch.Tensor):
            return batch.to(self.device)
        elif isinstance(batch, (list, tuple)):
            return [self._move_to_device(item) for item in batch]
        elif isinstance(batch, dict):
            return {key: self._move_to_device(value) for key, value in batch.items()}
        else:
            return batch

    # 执行一个 batch 的训练
    def training_step(self, batch: dict) -> dict:
        # adversarial_eta > 0.0 生成对抗样本
        if self.adversarial_eta > 0.0: 
            with torch.set_grad_enabled(True): 
                batch = self._generate_adversarial_batch(batch, self.adversarial_eta)

        outputs = self.model( batch, return_as_image=False, return_as_dict=True )  # patch space okay for training
        
        first_task = list(self.task_modules.keys())[0]
        batch_size = outputs["reconstructed_patches"][first_task].shape[0]

        # 计算训练损失 日志记录
        total_loss, loss_vals = self._training_loss(outputs, batch)
        
        return total_loss, loss_vals
    # 执行一个 epcoh
    def train_epoch(self, train_loader):
        loss_vals = None
        total_loss = torch.tensor(0, dtype=torch.float32)
        for batch_idx, batch in enumerate(train_loader):
            batch_total_loss, batch_loss_vals = self.training_step(batch)
            total_loss += batch_total_loss
            
            if loss_vals is None:
                for task, val in batch_loss_vals:
                    loss_vals[task] = val
            else:
                for task, val in batch_loss_vals:
                    loss_vals[task] += val
        return total_loss, loss_vals
    
    def train(self):
        logging.info("Training...")
        self.model.train()
        for epoch in range(self.num_epochs):
            epoch_loss = 0.0
            epoch_loss_vals = {task: 0.0 for task in self.task_modules.keys()}
            
            pbar = tqdm(self.train_loader, desc=f'Epoch {epoch+1}/{self.num_epochs}')
            for batch_idx, batch in enumerate(pbar):
                batch = self._move_to_device(batch)
                self.optimizer.zero_grad()
                total_loss, loss_vals = self.training_step(batch)
                total_loss.backward()
                
                if self.clip_grad:
                    torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.clip_grad_val)
                
                self.optimizer.step()
                epoch_loss += total_loss.item()
                for task, loss_val in loss_vals.items():
                    epoch_loss_vals[task] += loss_val.item()
                
                pbar.set_postfix({
                    'loss': total_loss.item(),
                    'lr': self.optimizer.param_groups[0]['lr']
                })
                
            # 计算训练损失
            avg_epoch_loss = epoch_loss / len(self.train_loader)
            avg_task_losses = {
                task: loss / len(self.train_loader) 
                for task, loss in epoch_loss_vals.items()
            }
            self.writer.add_scalar('train/loss', avg_epoch_loss, epoch+1)
            for task, avg_loss in avg_task_losses.items():
                self.writer.add_scalar(f'train/loss/{task}', avg_loss, epoch+1)
            
            logging.info(f'epoch: {epoch+1}, avg loss: {avg_epoch_loss}, lr: {self.optimizer.param_groups[0]["lr"]}')

            if self.scheduler is not None:
                if isinstance(self.scheduler, ReduceLROnPlateau):
                    self.scheduler.step(avg_epoch_loss)
                else:
                    self.scheduler.step()
            
            self.writer.add_scalar( 'lr', self.optimizer.param_groups[0]['lr'], epoch+1)

            if (epoch + 1) % self.check_val_every_n_epoch == 0:
                self.on_validation_epoch_start()
                logging.info('validation...')
                self.model.eval() 
                self.validation(self.val_loader, epoch)
                self.model.train() 
            
            if (epoch + 1) % self.ckpt_frequency == 0:
                logging.info('save checkpoint')
                self._save_checkpoint(epoch, avg_epoch_loss)

    # 选择 固定的 batch 和 batch 中的样本便于可视化
    def on_validation_epoch_start(self) -> None:
        # self.random_batch_idx = randint(0, len(self.val_dataloaders[0]) - 1)
        # self.random_idx = randint(0, self.trainer.val_dataloaders[0].batch_size - 1)
        self.random_batch_idx = len(self.val_loader)//2
        self.random_idx = self.cfg.loader.batch_size.test//2
        print(f'random_batch_idx={self.random_batch_idx}')
        print(f'random_idx={self.random_idx}')

    # 验证
    def validation(self, val_loader, epoch):
        reconstruction_type = "reconstructed_image" if self.sliding_window else "reconstructed_patches"
        
        loss_vals = {}
        total_metrics = {} 
        total_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
        
        viz_batch = None
        viz_inputs = None
        viz_outputs = None
    
        self.model.eval()
        with torch.no_grad(): 
            for batch_idx, batch in tqdm(enumerate(val_loader)):
                batch = self._move_to_device(batch)
                
                outputs = self.infere(batch)  # val metrics are calculated in image space
            
                for task, task_module in self.task_modules.items():
                    task_metrics = task_module.comp_metrics(outputs, batch, reconstruction_type)
                    
                    for metric_name, metric_value in task_metrics.items():
                        key = f"{metric_name}/{task}"
                        if key not in total_metrics:
                            total_metrics[key] = []
                        total_metrics[key].append(metric_value)

                # 计算验证损失
                batch_total_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
                for task, task_module in self.task_modules.items():
                    loss_val = task_module.comp_loss(outputs, batch)
                    weighted_loss = loss_val * self.task_loss_weights[task]
                    batch_total_loss += weighted_loss

                    if task in loss_vals:
                        loss_vals[task].append(loss_val)
                    else:
                        loss_vals[task] = [loss_val]
                
                total_loss += batch_total_loss
            
                if batch_idx == self.random_batch_idx:
                    viz_batch = batch
                    viz_inputs = outputs['selected_patches']
                    viz_outputs = outputs[reconstruction_type] 

            num_batches = len(val_loader)
            avg_total_loss = total_loss / num_batches if num_batches > 0 else total_loss
            # log metrics
            if (epoch+1)%(self.check_val_every_n_epoch)==0:
                self.writer.add_scalar('val/loss/total', avg_total_loss, epoch+1)
                for task, loss_list in loss_vals.items():
                    if loss_list:
                        avg_loss = torch.stack(loss_list).mean()
                        self.writer.add_scalar(f'val/loss/{task}', avg_loss, epoch+1)

            if (epoch+1)%(self.check_val_every_n_epoch*self.log_metrics_every_nth_val)==0:
                for metric_key, metric_values in total_metrics.items():
                    # logging.ino
                    if metric_values:
                        avg_metric = torch.stack(metric_values).mean()
                        self.writer.add_scalar(f'val/metrics/{metric_key}', avg_metric, epoch+1)
            # log
            if (epoch+1)%(self.check_val_every_n_epoch*self.log_images_every_nth_val)==0:
                if viz_batch is not None and viz_inputs is not None and viz_outputs is not None:
                    fig = self.visualize(viz_batch, viz_inputs, viz_outputs)
                    self.writer.add_figure('val/visualization', fig, epoch+1)
                    plt.close(fig)
        
    def visualize(self, batch, inputs, outputs):
        all_tasks = list(self.input_tasks) + [ task for task in self.output_tasks.keys() if task not in self.input_tasks ]
        
        all_visualizable_output_tasks = [task for task, module in self.task_modules.items() if module.has_visualization]

        all_tasks = list(self.input_tasks) + [ task for task in all_visualizable_output_tasks if task not in self.input_tasks ]
        
        n_tasks = len(all_tasks)
        fig, axs = plt.subplots(n_tasks, 3, figsize=(5 * 3, 5 * n_tasks), squeeze=False)

        random_idx = self.random_idx % inputs[all_tasks[0]].shape[0]

        for task_idx, task in enumerate(all_tasks):
            if task in batch and not task in self.task_modules:
                batch_ax = axs[task_idx, 0]
                input_ax = axs[task_idx, 1]
                gt_slice = batch[task].shape[-1] // 2
                batch_ax.imshow(
                    batch[task][random_idx, 0, ..., gt_slice].detach().cpu(),
                    cmap="viridis",
                    vmin=-1,
                    vmax=1,
                )
                batch_ax.set_title(f"ground truth - {task}")

                in_slice = inputs[task].shape[-1] // 2
                input_ax.imshow(
                    inputs[task][random_idx, 0, ..., in_slice].detach().cpu(),
                    cmap="viridis",
                    vmin=-1,
                    vmax=1,
                )
                batch_ax.set_title(f"input - {task}")
            else:
                axs[task_idx, :] = self.task_modules[task].visualize( batch, inputs, outputs, self.random_idx, axs[task_idx] )


        for ax_row in axs:
            for ax in ax_row:
                ax.axis('off')

        return fig

    def _save_checkpoint(self, epoch,epoch_loss):
        """保存模型检查点"""
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict() if self.scheduler else None,
            'loss': epoch_loss,
        }
        
        # 创建检查点目录（如果不存在）
        checkpoint_dir = os.path.join(self.log_dir, 'checkpoints')
        os.makedirs(checkpoint_dir, exist_ok=True)  # 关键：创建目录
        
        checkpoint_path = os.path.join(checkpoint_dir, f'checkpoint_epoch_{epoch+1}.pth')
        torch.save(checkpoint, checkpoint_path)
        print(f"检查点已保存: {checkpoint_path}")

    def _load_checkpoint(self, checkpoint_path):
        """加载模型检查点"""
        checkpoint = torch.load(checkpoint_path)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if self.scheduler and checkpoint['scheduler_state_dict']:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        print(f"从 epoch {checkpoint['epoch']} 加载检查点")

if __name__ == '__main__':
    trainer = Trainer(args)
    trainer.train()






