import os

import fsspec
import hydra
import lightning as L
import omegaconf
import rich.syntax
import rich.tree
import torch

import dataloader
import diffusion
import utils
import coco_dataloader
from lightning.pytorch.loggers import WandbLogger
omegaconf.OmegaConf.register_new_resolver(
  'cwd', os.getcwd)
omegaconf.OmegaConf.register_new_resolver(
  'device_count', torch.cuda.device_count)
omegaconf.OmegaConf.register_new_resolver(
  'eval', eval)
omegaconf.OmegaConf.register_new_resolver(
  'div_up', lambda x, y: (x + y - 1) // y)


def _load_from_checkpoint(config, tokenizer=None):
  if 'hf' in config.backbone:
    return diffusion.Diffusion(
      config, tokenizer=None).to('cuda')
  
  return diffusion.Diffusion.load_from_checkpoint(
    config.eval.checkpoint_path,
    tokenizer=None,
    config=config)


@L.pytorch.utilities.rank_zero_only
def _print_config(
  config: omegaconf.DictConfig,
  resolve: bool = True,
  save_cfg: bool = True) -> None:
  """Prints content of DictConfig using Rich library and its tree structure.
  
  Args:
    config (DictConfig): Configuration composed by Hydra.
    resolve (bool): Whether to resolve reference fields of DictConfig.
    save_cfg (bool): Whether to save the configuration tree to a file.
  """

  style = 'dim'
  tree = rich.tree.Tree('CONFIG', style=style, guide_style=style)

  fields = config.keys()
  for field in fields:
    branch = tree.add(field, style=style, guide_style=style)

    config_section = config.get(field)
    branch_content = str(config_section)
    if isinstance(config_section, omegaconf.DictConfig):
      branch_content = omegaconf.OmegaConf.to_yaml(
        config_section, resolve=resolve)

    branch.add(rich.syntax.Syntax(branch_content, 'yaml'))
  rich.print(tree)
  if save_cfg:
    with fsspec.open(
      '{}/config_tree.txt'.format(
        config.checkpointing.save_dir), 'w') as fp:
      rich.print(tree, file=fp)


@L.pytorch.utilities.rank_zero_only
def _print_batch(train_ds, valid_ds, tokenizer=None):
  for dl_type, dl in [
    ('train', train_ds), ('valid', valid_ds)]:
    print(f'Printing {dl_type} dataloader batch.')
    batch = next(iter(dl))
    
    # 针对COCO数据集的格式打印信息
    print('Batch contains:')
    print('- grayscale shape:', batch['grayscale'].shape)
    print('- original shape:', batch['original'].shape)
    print('- grayscale value range:', batch['grayscale'].min().item(), 'to', batch['grayscale'].max().item())
    print('- original value range:', batch['original'].min().item(), 'to', batch['original'].max().item())
    
    # 打印条件掩码和颜色条件信息
    if 'condition_mask' in batch:
        print('- condition_mask shape:', batch['condition_mask'].shape)
        print('- condition_mask value range:', batch['condition_mask'].min().item(), 'to', batch['condition_mask'].max().item())
        print('- condition_mask coverage:', batch['condition_mask'].sum().item() / batch['condition_mask'].numel())
    
    if 'color_condition' in batch:
        print('- color_condition shape:', batch['color_condition'].shape)
        print('- color_condition value range:', batch['color_condition'].min().item(), 'to', batch['color_condition'].max().item())


def generate_samples(config, logger, tokenizer):
  logger.info('Generating samples.')
  model = _load_from_checkpoint(config=config,
                                tokenizer=tokenizer)
  model.gen_ppl_metric.reset()
  if config.eval.disable_ema:
    logger.info('Disabling EMA.')
    model.ema = None
  stride_length = config.sampling.stride_length
  num_strides = config.sampling.num_strides
  for _ in range(config.sampling.num_sample_batches):
    if config.sampling.semi_ar:
      _, intermediate_samples, _ = model.restore_model_and_semi_ar_sample(
        stride_length=stride_length,
        num_strides=num_strides,
        dt=1 / config.sampling.steps)
      text_samples = intermediate_samples[-1]
      # Note: Samples generated using semi-ar method
      # need to to be processed before computing generative perplexity
      # since these samples contain numerous <|endoftext|> tokens
      # and diffusion.compute_generative_perplexity() discards
      # any text after the first EOS token.
    else:
      samples = model.restore_model_and_sample(
        num_steps=config.sampling.steps)
      text_samples = model.tokenizer.batch_decode(samples)
      model.compute_generative_perplexity(text_samples)
  print('Text samples:', text_samples)
  if not config.sampling.semi_ar:
    print('Generative perplexity:',
          model.gen_ppl_metric.compute())
  return text_samples

def _ppl_eval(config, logger, tokenizer):
  logger.info('Starting Zero Shot Eval.')

  model = _load_from_checkpoint(config=config,
                                tokenizer=tokenizer)
  if config.eval.disable_ema:
    logger.info('Disabling EMA.')
    model.ema = None

  wandb_logger = None
  callbacks = []
  if 'callbacks' in config:
    for _, callback in config.callbacks.items():
      callbacks.append(hydra.utils.instantiate(callback))
  trainer = hydra.utils.instantiate(
    config.trainer,
    default_root_dir=os.getcwd(),
    callbacks=callbacks,
    strategy=hydra.utils.instantiate(config.strategy),
    logger=wandb_logger)
  _, valid_ds = coco_dataloader.get_dataloaders(
    batch_size=config.data.train.batch_size, 
    num_workers=config.data.train.num_workers,
    use_slic=config.data.use_slic if hasattr(config.data, 'use_slic') else True,
    slic_ratio=config.data.slic_ratio if hasattr(config.data, 'slic_ratio') else 0.1
  )
  trainer.validate(model, valid_ds)

def _cfg_to_dict(cfg):
    # 把 OmegaConf 转成原生 dict，避免不可序列化对象
    return omegaconf.OmegaConf.to_container(cfg, resolve=True)

def _train(config, logger, tokenizer):
  logger.info('Starting Training.')
  

  if (config.checkpointing.resume_from_ckpt
      and config.checkpointing.resume_ckpt_path is not None
      and utils.fsspec_exists(
        config.checkpointing.resume_ckpt_path)):
    ckpt_path = config.checkpointing.resume_ckpt_path
  else:
    ckpt_path = None

  # Lightning callbacks
  callbacks = []
  if 'callbacks' in config:
    for _, callback in config.callbacks.items():
      callbacks.append(hydra.utils.instantiate(callback))

  # 使用COCO数据集加载器，传入SLIC相关参数
  train_ds, valid_ds = coco_dataloader.get_dataloaders(
    batch_size=config.loader.batch_size,
    num_workers=config.loader.num_workers,
    use_slic=config.loader.use_slic if hasattr(config.loader, 'use_slic') else True,
    slic_ratio=config.loader.slic_ratio if hasattr(config.loader, 'slic_ratio') else 0.1
  )
  # 打印批次信息，包括条件掩码和颜色条件
  # _print_batch(train_ds, valid_ds, None)

  model = diffusion.Diffusion(
    config, tokenizer=None)  # tokenizer设为None

  # ✅ 初始化 WandbLogger
  wandb_logger = WandbLogger(
        project=config.wandb.project,
        notes=config.wandb.notes,
        group=config.wandb.group,
        job_type=config.wandb.job_type,
        name=config.wandb.name,
        id=config.wandb.id,
        tags=config.wandb.tags,
    )
  # ✅ 用 Lightning 的接口记录超参（替代 experiment.config.update）
  try:
      wandb_logger.log_hyperparams(_cfg_to_dict(config))
  except Exception as e:
      logger.warning(f"wandb_logger.log_hyperparams failed: {e}")

  trainer = hydra.utils.instantiate(
    config.trainer,
    default_root_dir=os.getcwd(),
    callbacks=callbacks,
    strategy=hydra.utils.instantiate(config.strategy),
    logger=wandb_logger)
  trainer.fit(model, train_ds, valid_ds, ckpt_path=ckpt_path)


@hydra.main(version_base=None, config_path='configs',
            config_name='config')
def main(config):
  """Main entry point for training."""
  L.seed_everything(config.seed)
  _print_config(config, resolve=True, save_cfg=True)
  
  logger = utils.get_logger(__name__)
  tokenizer = None

  if config.mode == 'sample_eval':
    generate_samples(config, logger, tokenizer)
  elif config.mode == 'ppl_eval':
    _ppl_eval(config, logger, tokenizer)
  else:
    _train(config, logger, tokenizer)


if __name__ == '__main__':
  main()