import torch
import tqdm
import yaml
import os
import random
import numpy as np
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Literal
from threading import Thread
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.amp import GradScaler
from tensordict import TensorDict
from ciallo_trainer import local_rank, world_size, distributed
import nvtx

from .logger import logger
from .logger import LogSystem, TensorboardLogger, SwanlabLogger
from .module import Module
from .colorful import Color
from .util import get_free_port, copy_local_modules
from .utils.timing import get_average_time

def find_field_value(config_dict, field_name, default=None):
    """
    在配置字典中查找指定字段的值
    
    Args:
        config_dict: 配置字典
        field_name: 要查找的字段名
        default: 如果找不到字段时返回的默认值
    
    Returns:
        字段的值，如果找不到则返回默认值
    """
    def recursive_search(obj, key):
        """递归搜索嵌套字典/对象"""
        if hasattr(obj, '__dict__'):
            # 如果是对象，转换为字典
            obj = obj.__dict__
        
        if isinstance(obj, dict):
            # 直接查找
            if key in obj:
                return obj[key]
            
            # 递归查找嵌套结构
            for value in obj.values():
                if isinstance(value, (dict, object)) and hasattr(value, '__dict__'):
                    result = recursive_search(value, key)
                    if result is not None:
                        return result
        
        return None
    
    result = recursive_search(config_dict, field_name)
    return result if result is not None else default


def get_config_value(config, field_name):
    """
    获取配置中的特定字段值
    支持以下字段的查找：
    - 基本配置字段
    - 嵌套对象中的字段
    """
    
    # 首先尝试直接访问
    if hasattr(config, field_name):
        return getattr(config, field_name)
    
    # 如果是字典形式
    if isinstance(config, dict) and field_name in config:
        return config[field_name]
    
    # 递归查找
    return find_field_value(config, field_name)
    
@dataclass
class TrainerConfig:
    
    '''log system'''
    log_dir: str = 'logs'
    project_name: str = 'tmp'
    experiment_name: str = 'tmp'
    log_type: Literal['tensorboard', 'Swanlab'] = 'tensorboard'
    
    max_epochs:int = int(1e9)
    log_interval: int = 100 # the interval of logging, which means every log_interval steps will log the training loss

    seed: int = 20000227

    eval_interval: int = 1 # the interval of evaluation, which means every eval_interval epochs will evaluate the model using the validation set

    save_type: Literal["latest", "best", "interval", "topk"] = "latest" 
    save_interval: int = 1 # if save_type is 'interval', save_interval is the interval of saving
    save_topk: int = 5 # if save_type is 'topk', save_topk is the number of topk models to save

    use_amp: bool = True # whether to use automatic mixed precision training
    clip_grad_norm: float = 1.0 # the maximum norm of the gradients
    gradient_accumulation_steps: int = 1 # the number of gradient accumulation steps
    
    def __post_init__(self):
        assert self.max_epochs >= 1, "num_template_frames must be greater than 1"
        assert self.gradient_accumulation_steps > 0, f"gradient_accumulation_steps must be greater than 0, but got {self.gradient_accumulation_steps}"

class Trainer:
    def __init__(self, config: TrainerConfig):
        self.config = config
        self.local_rank = local_rank
        self.world_size = world_size
        self.distributed = distributed
        self.device = torch.device(f'cuda:{self.local_rank}')
    
        self.scaler = GradScaler(enabled=self.config.use_amp)
        self.epochs = config.max_epochs
        
        self.training_event = torch.cuda.Event(enable_timing=True, blocking=True)

        if self.config.log_type == 'tensorboard':
            self._logger = TensorboardLogger(config.log_dir,project_name=config.project_name, experiment_name=config.experiment_name, local_rank=self.local_rank)
            import subprocess
            if self.local_rank == 0:
                output_path = os.path.join(self._logger._get_log_dir(), 'output.txt')
                import sys
                sys.stdout = open(os.path.join(self._logger._get_log_dir(), 'output_info.txt'), 'w')
                if logger is not None:
                    import logging

                    file_handler = logging.FileHandler(output_path)
                    file_handler.setLevel(logging.DEBUG)
                    
                    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
                    file_handler.setFormatter(formatter)
                    logger.addHandler(file_handler)
                    
                port = get_free_port()
                command = ["tensorboard", "--logdir", self._logger._get_log_dir(), "--bind_all", "--port", str(port)]
                self.tensorboard_display = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                
                logger.info(f"Tensorboard is running at {Color.Green}http://localhost:{port}" + Color.End)
                logger.info("you can also use the command to start tensorboard: tensorboard --logdir xxx")
        elif self.config.log_type == 'Swanlab':
            self._logger = SwanlabLogger(config.log_dir,project_name=config.project_name, experiment_name=config.experiment_name, local_rank=self.local_rank)

    
    def get_expriment_dir(self):
        return self._logger._get_log_dir()
    
    def save_checkpoint(self, model:Module, epoch:int, optimizer:torch.optim.Optimizer, scheduler:torch.optim.lr_scheduler._LRScheduler):
        """
        save the model and optimizer state dict
        """
        if self.local_rank != 0:
            return
        state = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict()
        }
        path = os.path.join(self.get_expriment_dir(), f"checkpoint_{epoch}.pth")
        torch.save(state, path)
        logger.info(f"Checkpoint saved at {Color.Green} {os.path.join(self.get_expriment_dir(), f'checkpoint_{epoch}.pth')}" + Color.End)
        
        return path
        
    def load_checkpoint(self, model:Module, optimizer:torch.optim.Optimizer, scheduler:torch.optim.lr_scheduler._LRScheduler, checkpoint_path:str=None):
        """
        load the model and optimizer state dict
        """
        if checkpoint_path is None:
            return 0
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        logger.info(f"Checkpoint loaded from {Color.Green} {checkpoint_path}" + Color.End)
        return checkpoint['epoch']
    
    def _prepare_for_reproducibility(self, model:Module, check_exportable:bool=True, checkpoint_path:str=None):
        if self.local_rank == 0:
            check_exportable = False if checkpoint_path is not None else check_exportable # if checkpoint_path is not None, then we don't need to check exportable

            if check_exportable:
                model.check_exportable()
            else:
                logger.info(Color.Yellow + "Warning: model is not checked for exportable" + Color.End)

            copy_local_modules(self.get_expriment_dir(), stack_index=2) # stack_index=1 means we are not copying the current file but the file that called this `train` function. Since we are in the `_prepare_for_reproducibility` function, we need to copy the file that called this function, which is 2 levels up in the stack.
            
            configs = {}
            if model.config is not None:
                configs['model'] = model.config
            
            configs['trainer'] = self.config
            
            self.batch_size = get_config_value(configs, 'batch_size')

            self._logger.config(configs)
    
    def train(self, train_dataloader: torch.utils.data.DataLoader, val_dataloader: torch.utils.data.DataLoader, model:Module, check_exportable:bool=True, checkpoint_path:str=None):

        self._prepare_for_reproducibility(model=model, checkpoint_path=checkpoint_path, check_exportable=check_exportable)

        self.model = model.cuda()

        self.optimizer, self.lr_scheduler = model.configure_optimizers()
        
        if self.distributed:
            s = torch.cuda.Stream()
            s.wait_stream(torch.cuda.current_stream())
            with torch.cuda.stream(s):
                self.model_ddp = DDP(self.model, device_ids=[self.device])
            torch.cuda.current_stream().wait_stream(s)
            
            if isinstance(train_dataloader, torch.utils.data.DataLoader):
                # change the sampler of the dataloader to DistributedSampler
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataloader.dataset, 
                    batch_size=train_dataloader.batch_size, 
                    sampler=torch.utils.data.DistributedSampler(train_dataloader.dataset),
                    num_workers=train_dataloader.num_workers,
                    pin_memory=train_dataloader.pin_memory,
                    drop_last=train_dataloader.drop_last,
                    persistent_workers=train_dataloader.persistent_workers,
                    collate_fn=train_dataloader.collate_fn
                )

        def lr_step():
            self.lr_scheduler.step()
            self._logger.log({'lr': self.lr_scheduler.get_last_lr()[0]})

        self.model.on_train_start()
        
        start_epoch = 0
        try:
            start_epoch = self.load_checkpoint(model=self.model, optimizer=self.optimizer, scheduler=self.lr_scheduler, checkpoint_path=checkpoint_path)
        except:
            if self.local_rank == 0:
                logger.info(Color.Yellow + "Warning: failed to load checkpoint" + Color.End)
        try:
            for epoch in range(start_epoch, self.epochs):
                if self.distributed and isinstance(train_dataloader, torch.utils.data.DataLoader):
                    train_dataloader.sampler.set_epoch(epoch)
                    
                self.model.on_train_epoch_start(epoch)

                self._train_one_epoch(train_dataloader=train_dataloader, epoch=epoch)
                    
                self.model.on_train_epoch_end(lr_step)

                if epoch % self.config.eval_interval == 0:
                    if self.local_rank == 0:
                        self.validate(val_dataloader, model=self.model, epoch=epoch)
                    
                    if self.distributed:
                        torch.distributed.barrier() 

                if self.local_rank == 0 and epoch % self.config.save_interval == 0:
                    self.save_checkpoint(model=self.model, epoch=epoch, optimizer=self.optimizer, scheduler=self.lr_scheduler)

            self.model.on_train_end()
        except KeyboardInterrupt as e:
            if self.local_rank == 0:
                logger.info(Color.Red + "Training interrupted by user, try to save the model" + Color.End)
                self.save_checkpoint(model=self.model, epoch=epoch, optimizer=self.optimizer, scheduler=self.lr_scheduler)
            torch.distributed.barrier()
            raise e
        except Exception as e:
            if self.local_rank == 0:
                logger.info(Color.Red + f"Training failed: {e}" + Color.End)
                raise e
    def _train_one_epoch(self, train_dataloader: torch.utils.data.DataLoader, epoch: int):
        self.model.train()
        self.model.bind_log(self._logger)

        loop = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader), desc=f'Epoch [{epoch}/{self.epochs}]', disable=self.local_rank != 0)

        is_pytorch_loader = isinstance(train_dataloader, torch.utils.data.DataLoader)
        
        state_average_time = get_average_time()
        for batch_idx, batch in loop:
            if len(state_average_time) > 0:
                loop.set_postfix(load=state_average_time)
            self.training_event.wait(torch.cuda.current_stream())
            
            self.optimizer.zero_grad(set_to_none=True)
            
            try:
                self._execute_training_step(batch, batch_idx, log_step=self.local_rank == 0 and batch_idx % self.config.log_interval == 0)
            except Exception as e:
                logger.info(f"{Color.Red}Error: {e}" + Color.End)
                continue
            
            # normal optimizer step
            if batch_idx % self.config.gradient_accumulation_steps == 0 or batch_idx == len(train_dataloader) - 1:
                self.scaler.step(self.optimizer)
                self.scaler.update()
            # torch.
            self.training_event.record(torch.cuda.current_stream())
            
    def check_gradients(self, model):
        """check the gradient statistics"""
        grad_stats = {}
        
        for name, param in model.named_parameters():
            if param.grad is not None:
                grad = param.grad
                grad_stats[name] = {
                    'mean': grad.mean().item(),
                    'std': grad.std().item(),
                    'min': grad.min().item(),
                    'max': grad.max().item(),
                    'has_nan': torch.isnan(grad).any().item(),
                    'has_inf': torch.isinf(grad).any().item()
                }
        
        return grad_stats
    
    def _execute_training_step(self, batch, batch_idx, log_step=False):
        # normal execution
        with torch.autocast(self.device.type, enabled=self.config.use_amp,dtype=torch.bfloat16):    
            training_loss = self.model.training_step(batch, batch_idx) 
            if torch.isnan(training_loss.losses[0]).any():
                # self.check_gradients(self.model)
                logger.info(f"{Color.Red}Warning: training loss is nan{Color.End}")
            loss = torch.nan_to_num(training_loss.losses[0], nan=0.0, posinf=0.0, neginf=0.0) / self.config.gradient_accumulation_steps
            self.scaler.scale(loss).backward()
            if self.config.clip_grad_norm > 0:
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip_grad_norm)
        
        if log_step :
            self.model.log_step(training_loss.detach())
            self._logger.set_step(self._logger._steps + self.batch_size * world_size * self.config.log_interval)
            
    def _to_device(self, batch):
        if isinstance(batch, dict):
            batch = {k: v.to(self.device) for k, v in batch.items()}
        elif isinstance(batch, list):
            batch = [b.to(self.device) for b in batch]
        elif isinstance(batch, tuple):
            batch = tuple([ v.to(self.device) if isinstance(v, torch.Tensor) else v for  v in batch])
        elif isinstance(batch, TensorDict):
            batch = batch.to(self.device)
        else:
            raise ValueError(f"Unknown type {type(batch)}")
        return batch
                    
    def validate(self, test_loader: torch.utils.data.DataLoader, model:Module, epoch: int):
        
        if test_loader is None:
            logger.info(Color.Yellow + "Warning: no validation set" + Color.End)
            return
        self.model = model
        self.model.eval()
        self.model.to(self.device)
        self.model.bind_log(self._logger)

        if self.local_rank != 0:
            return
        
        with torch.no_grad():
            self.model.on_validation_start()

            loop = tqdm.tqdm(enumerate(test_loader), total = len(test_loader))
            for batch_idx, batch in loop:
                batch = self._to_device(batch)

                self.model.validation_step(batch, batch_idx)
            
            self.model.on_validation_end()

    def __del__(self):
        if self.local_rank == 0:
            if self.config.log_type == 'tensorboard' and hasattr(self, 'tensorboard_display'):
                self.tensorboard_display.kill()
            
        if self.distributed:
            torch.cuda.device_count()
            torch.distributed.destroy_process_group()