#!/usr/bin/env python3
# Author: Armit
# Create Time: 2024/04/24

import sys
import json
from pathlib import Path
from datetime import datetime
from argparse import ArgumentParser
from typing import List

import torch
import torch.nn.functional as F
from torch.optim import SGD, Adam
from torch.utils.data import TensorDataset, DataLoader
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms.functional as TF

from scene.dataset_readers import sceneLoadTypeCallbacks, SceneInfo, CameraInfo
from scene.extractor_model import ExtracorModel, PROJECTOR_CLASSES, param_cnt
from utils.system_utils import searchForMaxIteration

if 'setup cuda':
  torch.backends.cudnn.enabled = True
  torch.backends.cudnn.benchmark = True
  torch.backends.cudnn.allow_tf32 = True

  device = 'cuda' if torch.cuda.is_available() else 'cpu'

LOSS_FN = {
  'l1':     F.l1_loss,
  'l2':     F.mse_loss,
  'huber':  F.huber_loss,
  'cos':    lambda x, y: 1 - F.cosine_similarity(x, y, dim=-1).mean(),         # 1 -  cossim  ∈ [0, 2]
  'abscos': lambda x, y: 1 - F.cosine_similarity(x, y, dim=-1).abs().mean(),   # 1 - |cossim| ∈ [0, 1]
}

def get_pretrained_embedding(args) -> Tensor:
  # find ckpt
  ckpt_fp = args.load
  if ckpt_fp is None and 'mock Scene()':
    loaded_iter = searchForMaxIteration(args.model_path / 'point_cloud')
    print(f'>> loading trained ckpt at iteration {loaded_iter}')
    ckpt_fp = Path(args.model_path) / 'point_cloud' / f'iteration_{loaded_iter}' / 'checkpoints.pth'
    assert ckpt_fp.exists(), f'ckpt file not found at {ckpt_fp}'
  ckpt = torch.load(ckpt_fp, map_location='cpu')

  # find embed key
  key = args.key
  if key not in ckpt: raise KeyError(f'>> key {key} not found in ckpt')
  embed = list(ckpt[key].values())[0]
  assert isinstance(embed, Tensor), f'embed should be a Tensor, but got {type(embed)}'
  print('>> embed.shape:', embed.shape)
  embed.requires_grad = False
  return embed.detach().clone()

def get_camera_images(args) -> Tensor:
  # load scene
  scene_info: SceneInfo = None
  if Path(args.source_path / 'dense' / 'sparse').exists():
    scene_info = sceneLoadTypeCallbacks['ColmapExt'](args.source_path, args.images, args.eval)
  elif Path(args.source_path / 'sparse').exists():
    scene_info = sceneLoadTypeCallbacks['Colmap'](args.source_path, args.images, args.eval)
  elif Path(args.source_path / 'transforms_train.json').exists():
    print('Found transforms_train.json file, assuming Blender data set!')
    scene_info = sceneLoadTypeCallbacks['Blender'](args.source_path, args.white_background, args.eval)
  else:
    raise NotImplementedError('Could not recognize scene type!')
  # load cameras
  cam_infos: List[CameraInfo] = scene_info.train_cameras  # + scene_info.test_cameras
  cameras = [TF.to_tensor(cam.image) for cam in cam_infos]
  return torch.stack(cameras, dim=0)


def train(args):
  ''' Hparams '''
  exp_name = 'embed_extractor'
  model_path = Path(args.model_path)
  with open(model_path / f'{exp_name}.json', 'w', encoding='utf-8') as fh:
    hp_dump = {
      'cmd': ' '.join(sys.argv),
      'args': vars(args),
      'dt': str(datetime.now()),
    }
    _cvt = lambda v: str(v) if isinstance(v, Path) else v
    json.dump(hp_dump, fh, indent=2, ensure_ascii=False, default=_cvt)
  sw = SummaryWriter(model_path, comment=exp_name)

  ''' Data '''
  Y = get_pretrained_embedding(args)
  X = get_camera_images(args)
  assert len(X) == len(Y), f'length mismatch: len(image) {len(X)} != len(embed) {len(Y)}'
  if args.data_device == 'cuda':
    print('>> preload all data to GPU')
    X = X.to(args.data_device)
    Y = Y.to(args.data_device)

  dataset = TensorDataset(X, Y)
  trainloader = DataLoader(dataset, args.batch_size, shuffle=True, pin_memory=args.data_device=='cpu', drop_last=True)
  print(f'>> data: n_sample = {len(dataset)}, n_batch = {len(trainloader)}')

  ''' Model & Optim '''
  model = ExtracorModel(args.backbone, args.projector, args.pool_size, Y.shape[-1])
  try:
    model = torch.compile(model)
    print('>> compile model for speeding up! :)')
  except: pass
  model: ExtracorModel = model.to(device)
  print(model)
  print('>> param_cnt:', param_cnt(model))
  params = [
    {'params': model.backbone .parameters(), 'lr': args.learning_rate[0]},
    {'params': model.projector.parameters(), 'lr': args.learning_rate[1]},
  ]
  params = [it for it in params if it['lr'] > 0]
  optim = SGD(params, momentum=args.momentum)
  loss_fn = LOSS_FN[args.loss]

  ''' Train '''
  step = 0
  epoch = 0
  model.train()
  try:
    for epoch in range(1, args.epochs+1):
      for X, Y in trainloader:
        X, Y = X.to(device), Y.to(device)

        optim.zero_grad()
        out = model(X)
        loss = loss_fn(out, Y)
        loss.backward()
        optim.step()

        step += 1

        if step % 10 == 0:
          print(f'[step {step}] loss: {loss.item()}')
          sw.add_scalar('train/loss', loss.item(), global_step=step)
  except KeyboardInterrupt:
    print('>> Exit by Ctrl+C')

  ''' Save '''
  ckpt = {
    'step': step,
    'epoch': epoch,
    'model': model.state_dict(),
    'optim': optim.state_dict(),
  }
  torch.save(ckpt, model_path / f'{exp_name}.pth')


if __name__ == '__main__':
  parser = ArgumentParser()
  # ↓↓↓ mock original args
  parser.add_argument('-s', '--source_path', required=True, type=Path, help='dataset path')
  parser.add_argument('-m', '--model_path', required=True, type=Path, help='pretrained log path for *.ckpt file')
  parser.add_argument('--images', default='images', help='dataset subfolder path')
  parser.add_argument('--white_background', action='store_true')
  parser.add_argument('--eval', action='store_true', help='split dataset')
  # ↓↓↓ new args
  parser.add_argument('-K', '--key', required=True, help='specify embedding key name in *.ckpt')
  parser.add_argument('-L', '--load', type=Path, help='specify *.ckpt file to load, defaults to auto detect')
  parser.add_argument('-B', '--backbone', default='vgg16', help='torchvison model_gen() names, e.g.: vgg16, resnet18')
  parser.add_argument('-P', '--projector', default='MLP', choices=PROJECTOR_CLASSES.keys(), help='prefix of your Projector class')
  parser.add_argument('--pool_size', default=[3], type=int, nargs='+', help='adaptive avgpool size')
  parser.add_argument('--epochs', default=100, type=int)
  parser.add_argument('--batch_size', default=4, type=int)
  parser.add_argument('-lr', '--learning_rate', default=[2e-6, 2e-4], type=float, nargs='+', help='learning rate for backbone and projector')
  parser.add_argument('--momentum', default=0.8, type=float, help='SGD momentum')
  parser.add_argument('--loss', default='l2', choices=LOSS_FN.keys())
  parser.add_argument('--data_device', default='cpu', choices=['cpu', 'cuda'], help='preload all data to data_device, set to cuda if you have a large VRAM')
  parser.add_argument('--matmul_prec', default='high', choices=['highest', 'high', 'medium'], help='float32_matmul_precision')
  args = parser.parse_args()

  # tune this w.r.t your hardware :)
  torch.set_float32_matmul_precision(args.matmul_prec)

  # sanity check
  if len(args.pool_size) == 1:
    args.pool_size = args.pool_size[0]
  else:
    assert len(args.pool_size) == 2, 'pool_size should be one or two int, e.g.: 4 or 3 4'

  if len(args.learning_rate) == 1:
    args.learning_rate = [args.learning_rate] * 2
  else:
    assert len(args.learning_rate) == 2, 'learning_rate should be one or two float'

  train(args)
