import os
import math
import shutil
import cv2
import json
import argparse
import tempfile
import numpy as np
import pandas as pd
from functools import partial
from pathlib import Path
from collections import deque
from msi_utils import patch_render, L_sample, get_tvc_loss, measurement, get_PCA_direction
from my_network import MsiModel
from nex_network import NexMsi
from video_path import generate_sphere_cams, generate_spiral_cams2
from vcpy.sfmdata import load_sfm_data
from vcpy.linearfitting import sphere_fit, fit_circle3
from lfcapture.data_proc_pipe.registration_pipeline import transform_sfm_file

import torch as pt
from torch import optim
from torch import distributed as dist
from torch import multiprocessing as mp
from torch.utils.data import Dataset, DataLoader, random_split
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision.utils import make_grid

USE_NEX = True
THETA_HALF_DEGREE_RANGE = 40
PHI_HALF_DEGREE_RANGE = 30

def load_normalized_image(filepath):
  img = cv2.imread(filepath)
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  img = img.astype(np.float32) / 255
  return pt.from_numpy(img)

class MsiDataSet(Dataset):
  def __init__(self, pixel_rays_targs, Rs, Ts, im_filepaths):
    self.data_list = [(pixel_rays_targs[i], Rs[i], Ts[i])
      for i in range(len(pixel_rays_targs))]
    self.im_filepaths = im_filepaths

    self.cache = {}
    self.cache_queue = deque()
    self.cache_size = 100

  def fromCache(self, index):
    filepath = self.im_filepaths[index]

    if filepath in self.cache:
      return self.cache[filepath]

    if len(self.cache) == self.cache_size:
      dp = self.cache_queue.popleft()
      del self.cache[dp]

    img = load_normalized_image(filepath)
    self.cache_queue.append(filepath)
    self.cache[filepath] = (img, index)
    return self.cache[filepath]

  def __getitem__(self, index):
    return (self.data_list[index],
      self.fromCache(index))

  def __len__(self):
    return len(self.data_list)
class WrappedDataLoader:
  def __init__(self, dl, func):
    self.dl = dl
    self.func = func

  def __len__(self):
    return len(self.dl)

  def __iter__(self):
    batches = iter(self.dl)
    for b in batches:
      yield (self.func(*b))
def preprocess(gpu_id, x, y):
  return (tuple(x[i].cuda(gpu_id) for i in range(len(x))),
    tuple(y[i].cuda(gpu_id) for i in range(len(y))))

class VideoDataSet(Dataset):
  def __init__(self, cams_json):
    self.cams_json = cams_json

  def __getitem__(self, index):
    return (self.cams_json[index]['rot'], self.cams_json[index]['t'], index)

  def __len__(self):
    return len(self.cams_json)

# train
def loss_func(input, model, target, args):
  loss0 = pt.mean(pt.square((input[0] - target)))

  ox = input[0][:, 1::3, :] - input[0][:, 0::3, :]
  oy = input[0][:, 2::3, :] - input[0][:, 0::3, :]
  gx = target[:, 1::3, :] - target[:, 0::3, :]
  gy = target[:, 2::3, :] - target[:, 0::3, :]
  loss1 = args.gradloss * (pt.mean(pt.abs(ox - gx)) + pt.mean(pt.abs(oy - gy)))

  if USE_NEX:
    loss2 = args.tvc * get_tvc_loss(model.msi_rgb)
  else:
    loss2 = args.tvc * get_tvc_loss(model.msi_rgba[...,:3])

  return loss0 + loss1 + loss2

def loss_batch(model, xb, yb, args, opt=None):
  h = yb[0].shape[-3]
  w = yb[0].shape[-2]
  if False:
    Lshape_N = (h - 1) * (w - 1)
    Lsample_N = args.sample_ray_N // 3
    num = int(math.ceil(Lshape_N / Lsample_N))
    average_loss = 0
    total_valid_num = 0
    for i in range(num):
      if i < num -1 :
        picks = pt.arange(Lsample_N * i, Lsample_N * (i + 1))
      else:
        picks = pt.arange(Lsample_N * i, Lshape_N)
      shift = picks // (w-1)
      picks += shift
      samples = pt.zeros(len(picks) * 3).to(pt.long)
      samples[0::3] = picks
      samples[1::3] = picks + 1
      samples[2::3] = picks + w
      sub_coords = xb[0][..., samples, :]
      sub_target = yb[0].view(list(yb[0].shape[:-3]) +
        [h * w, yb[0].shape[-1]])[..., samples, :]

      loss = loss_func(model(sub_coords, xb[1], xb[2], use_offset=True),
        model.module, sub_target, args)
      average_loss += loss * valid_num
      total_valid_num += valid_num
      if opt is not None:
        loss.backward()
  else:
    samples = L_sample([h, w], args.sample_ray_N)
    sub_coords = xb[0][..., samples, :]
    sub_target = yb[0].view(list(yb[0].shape[:-3]) +
      [h * w, yb[0].shape[-1]])[..., samples, :]

    loss = loss_func(model(sub_coords, xb[1], xb[2], use_offset=True),
      model.module, sub_target, args)
    average_loss = loss
    total_valid_num = 1
    if opt is not None:
      loss.backward()
  pt.cuda.empty_cache()

  if opt is not None:
    opt.step()
    opt.zero_grad(set_to_none=True)
  pt.cuda.empty_cache()

  return (average_loss / total_valid_num).item(), len(xb[0])

def visualize_result(
  val_dl, model, curr_epoch, output_folder, args, im_filenames):
  filenames = []
  psnrs = []
  ssims = []
  lpips = []
  for xb, yb in val_dl:
    h = yb[0].shape[-3]
    w = yb[0].shape[-2]
    im_result = patch_render(model, xb[0], xb[1], xb[2],
      args.sample_ray_N * 2).cpu().detach().numpy()
    im_result = im_result.reshape(
      [-1, h, w, im_result.shape[-1]])
    for im, gt in zip(im_result, yb[0]):
      psnr, ssim, lpip = measurement(im, gt)
      filenames.append(f'{curr_epoch:04d}_{im_filenames[yb[1]]}')
      psnrs.append(psnr)
      ssims.append(ssim)
      lpips.append(lpip)
      im = (im * 255 + 0.5).astype(np.uint8)
      im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
      cv2.imwrite(str(output_folder / filenames[-1]), im)

  filenames.append('average')
  psnrs.append(np.mean(np.array(psnrs)))
  ssims.append(np.mean(np.array(ssims)))
  lpips.append(np.mean(np.array(lpips)))
  df = pd.DataFrame.from_dict({
    'name': filenames,
    'PSNR': psnrs,
    'SSIM': ssims,
    'LPIPS': lpips
  })
  df.to_csv(output_folder / 'ssim_psnr.csv', index=False)

def fit(args, model, opt, scheduler,
  train_dl, output_folder, im_filenames, val_dl=None, rank=0):
  start_epoch = 0
  ckpt_file = output_folder / 'model.ckpt'
  if ckpt_file.exists():
    loaded_epoch = load_checkpoint(ckpt_file, model, opt, scheduler)
    start_epoch = loaded_epoch + 1
    pt.cuda.empty_cache()

  if args.predict:
    if val_dl is not None and rank == 0:
      model.eval()
      with pt.no_grad():
        visualize_result(val_dl, model,
          loaded_epoch, output_folder, args, im_filenames)
    return

  if rank == 0:
    writer = SummaryWriter(output_folder)
  early_terminate_count = 0
  early_terminate = False
  prev_loss = 1e10
  for epoch in range(start_epoch, args.epochs+1):
    model.train()
    for i, (xb, yb) in enumerate(train_dl):
      loss, _ = loss_batch(model, xb, yb, args, opt)
      print(f'{epoch}, rank{rank}, {i}/{len(train_dl)}, {loss}, {scheduler.get_last_lr()[-1]}', end='\r')
    scheduler.step()

    if (epoch != 0 and epoch % (args.epochs // 10) == 0 or early_terminate) and rank == 0:
      save_checkpoint(ckpt_file, model, opt, scheduler, epoch)

    if val_dl is not None and rank == 0:
      model.eval()
      with pt.no_grad():
        losses, batch_nums = zip(
          *[loss_batch(model, xb, yb, args) for xb, yb in val_dl])
        loss = np.sum(np.multiply(losses, batch_nums)) / np.sum(batch_nums)
        print(f'val: {epoch}, {loss}')
        writer.add_scalar('training loss', loss, epoch)

        if abs(prev_loss - loss) / loss < 2e-3:
          print(f'early_terminate_count: {early_terminate_count}')
          early_terminate_count += 1
          if early_terminate_count > 5:
            early_terminate = True
        else:
          early_terminate_count = 0
        prev_loss = loss

        if (epoch != 0 and epoch % (args.epochs // 10) == 0 or early_terminate):
          visualize_result(val_dl, model,
            epoch, output_folder, args, im_filenames)

        if (epoch != 0 and epoch % (args.epochs // 10) == 0 or early_terminate):
          msi_color = model.module.msi_rgb if USE_NEX else model.module.msi_rgba[...,:3]
          msi_color_result = pt.sigmoid(msi_color).reshape(
            [-1] + list(msi_color.shape[-3:])).permute(0, 3, 1, 2)
          writer.add_image(f'msi layers, epoch: {epoch}',
            make_grid(msi_color_result, nrow=1))

    if early_terminate:
      break
  if rank == 0:
    writer.flush()
    writer.close()

def normalize_sfm_data(sfm_file_in, sfm_file_out, fit_type):
  sfm_data = load_sfm_data(sfm_file_in)
  cam_pos_vec = np.array(
    [view.pose.camera_frame[:3,3] for view in sfm_data.views.values()])

  if fit_type == 'sphere':
    # not support 360-theta sphere
    _, cam_x, cam_y, cam_z = sphere_fit(cam_pos_vec)
    cam_center = np.r_[cam_x, cam_y, cam_z]
    cam_r = max(np.linalg.norm(cam_pos_vec - cam_center, axis=1))

    zs = np.array(
      [view.pose.camera_frame[:3,2] for view in sfm_data.views.values()])
    Z_axis = np.mean(zs, 0)

    eig_vectors = get_PCA_direction(cam_pos_vec - cam_center)
    if eig_vectors[:,0].dot(Z_axis) > eig_vectors[:,1].dot(Z_axis):
      X_axis = eig_vectors[:,1]
    else:
      X_axis = eig_vectors[:,0]
    Y_axis = np.cross(Z_axis, X_axis)
    Z_axis = np.cross(X_axis, Y_axis)
  elif fit_type == 'circle':
    # only support 360-theta circle
    cam_center, Y_axis, _ = fit_circle3(cam_pos_vec)
    cam_r = max(np.linalg.norm(cam_pos_vec - cam_center, axis=1))

    eig_vectors = get_PCA_direction(cam_pos_vec - cam_center)
    X_axis = eig_vectors[:,0]
    Z_axis = np.cross(X_axis, Y_axis)
    X_axis = np.cross(Y_axis, Z_axis)
  else:
    cam_center = np.mean(cam_pos_vec, axis=0)

    pair_wise_dist = np.linalg.norm(
      cam_pos_vec[None,...] - cam_pos_vec[:,None,...], axis=-1)
    cam_r = np.max(pair_wise_dist) / 2

    zs = np.array(
      [view.pose.camera_frame[:3,2] for view in sfm_data.views.values()])
    Z_axis = np.mean(zs, 0)

    eig_vectors = get_PCA_direction(cam_pos_vec - cam_center)
    X_axis = eig_vectors[:,0]
    Y_axis = np.cross(Z_axis, X_axis)
    X_axis = np.cross(Y_axis, Z_axis)
  X_axis /= np.linalg.norm(X_axis)
  Y_axis /= np.linalg.norm(Y_axis)
  Z_axis /= np.linalg.norm(Z_axis)

  mat = np.eye(4)
  mat[:3, 0] = X_axis
  mat[:3, 1] = Y_axis
  mat[:3, 2] = Z_axis
  mat[:3,3] = cam_center
  mat = np.linalg.inv(mat)

  # sphere at dmin must be seen by at least two cameras
  # here we set an approximate value
  pair_wise_dist = np.linalg.norm(
    cam_pos_vec[None,...] - cam_pos_vec[:,None,...], axis=-1)
  min_dmin = cam_r + np.max(np.sort(pair_wise_dist)[:,3]) * 2

  radius_depths = []
  for point in sfm_data.structure.values():
    radius_depths.append(np.linalg.norm(point.X + mat[:3,3]))
  radius_depths = np.sort(np.array(radius_depths))
  dmin = max(radius_depths[int(len(radius_depths) * 0.001)], min_dmin)
  dmax = radius_depths[int(len(radius_depths) * 0.999)]

  transform_sfm_file(sfm_file_in, mat, sfm_file_out, False)
  return dmin, dmax

def load_normalized_data(sfm_file, image_folder, fit_type):
  # normalize sfm data
  with tempfile.TemporaryDirectory() as temp_dir:
    sfm_data_n_file = Path(temp_dir) / 'normalized_sfm_data.json'
    dmin, dmax = normalize_sfm_data(sfm_file, sfm_data_n_file, fit_type=fit_type)
    # in right-hand opencv coordinate system
    sfm_data = load_sfm_data(sfm_data_n_file)
  im_filepaths = []
  im_filenames = []
  rot_targs = []
  t_targs = []
  pixel_rays_targs = []
  for view in sfm_data.views.values():
    im_filepaths.append(str(image_folder / view.filename))
    im_filenames.append(view.filename)
    rot_targs.append(pt.from_numpy(view.pose.camera_frame[:3,:3]).to(pt.float32))
    t_targs.append(pt.from_numpy(view.pose.camera_frame[:3,3]).to(pt.float32))
  h, w = view.height, view.width
  ys, xs = pt.meshgrid(pt.arange(h), pt.arange(w))
  ones = pt.ones_like(ys)
  pixel_coords = pt.stack([xs, ys, ones], 0).to(pt.float32).reshape(3, -1)
  K = view.intrinsics.K
  K_inv = pt.from_numpy(K).inverse().to(pt.float32)
  pixel_rays = (K_inv @ pixel_coords).transpose(-1, -2)
  pixel_rays_targs = [pixel_rays] * len(sfm_data.views)

  return im_filepaths, im_filenames, rot_targs, t_targs, pixel_rays_targs, \
    h, w, K, dmin, dmax

def backup_code(output_folder):
  code_folder = Path(__file__).parent
  output_folder.mkdir(parents=True, exist_ok=True)
  os.system(f'cp {code_folder.absolute()}/*.py {output_folder.absolute()}/')

def save_checkpoint(filepath, model, optimizer, scheduler, epoch):
  print(f'Saving checkpoint @ Epoch {epoch}...')
  pt.save({
    'epoch': epoch,
    'model_state_dict': model.state_dict(),
    'optimizer_state_dict': optimizer.state_dict(),
    'scheduler_state_dict': scheduler.state_dict(),
    }, filepath)

def load_checkpoint(filepath, model, optimizer=None, scheduler=None):
  checkpoint = pt.load(filepath)
  model.load_state_dict(checkpoint['model_state_dict'])
  if optimizer is not None:
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
  if scheduler is not None:
    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
  epoch = checkpoint['epoch']
  print(f'Loading checkpoint @ Epoch {epoch}')
  return epoch

def run(rank, args, sfm_data_file, msi_folder, image_folder):
  world_size = args.gpus
  dist.init_process_group('nccl', rank=rank, world_size=world_size)

  # set the random seed so that the models are initialized with the same weights
  pt.manual_seed(0)
  pt.cuda.set_device(rank)

  # prepare data
  im_filepaths, im_filenames, rot_targs, t_targs, pixel_rays_targs, \
    h, w, K, dmin, dmax = \
    load_normalized_data(sfm_data_file, image_folder, args.cam_pos_shape)

  # prepare params
  pixels_per_degree = math.radians(K[0,0]) # decides msi's resolution, also has effect on learning rate
  msi_resolution_w = int(THETA_HALF_DEGREE_RANGE * pixels_per_degree * 2 + 0.5)
  msi_resolution_h = int(PHI_HALF_DEGREE_RANGE * pixels_per_degree * 2 + 0.5)

  config_file = msi_folder / 'config.json'
  config_data = {}
  config_data['dmin'] = dmin
  config_data['dmax'] = dmax
  config_data['msi_resolution_w'] = msi_resolution_w
  config_data['msi_resolution_h'] = msi_resolution_h
  config_data['theta_half_degree_range'] = THETA_HALF_DEGREE_RANGE
  config_data['phi_half_degree_range'] = PHI_HALF_DEGREE_RANGE
  config_data['lr'] = args.lr
  config_data['epochs'] = args.epochs
  config_data['scheduler_gamma'] = args.scheduler_gamma
  with open(config_file, 'w') as f:
    json.dump(config_data, f)

  # create model
  if USE_NEX:
    model = NexMsi(dmin, dmax, msi_resolution_w, msi_resolution_h,
      math.radians(THETA_HALF_DEGREE_RANGE),
      math.radians(PHI_HALF_DEGREE_RANGE),
      rank).cuda(rank)
  else:
    model = MsiModel(dmin, dmax, msi_resolution_w, msi_resolution_h,
      math.radians(THETA_HALF_DEGREE_RANGE),
      math.radians(PHI_HALF_DEGREE_RANGE),
      rank).cuda(rank)
  model = DDP(model, device_ids=[rank])

  # training
  dataset = MsiDataSet(
    pixel_rays_targs, rot_targs, t_targs, im_filepaths)
  train_size = int(len(dataset) * args.train_ratio + 0.5)
  val_size = int(len(dataset) - train_size)
  train_ds, val_ds = random_split(dataset, [train_size, val_size])
  train_sampler = DistributedSampler(
    train_ds, num_replicas=world_size, rank=rank)
  train_dl = DataLoader(train_ds, batch_size=args.batch_size,
    sampler=train_sampler, pin_memory=True)
  train_dl = WrappedDataLoader(train_dl, partial(preprocess, rank))
  if rank == 0:
    val_dl = DataLoader(val_ds, batch_size=args.batch_size, pin_memory=True)
    val_dl = WrappedDataLoader(val_dl, partial(preprocess, rank))
  else:
    val_dl = None
  opt = model.module.get_optim(args.lr, 1)
  step_epoch = max(args.epochs // int(math.log(5e-4, args.scheduler_gamma)), 1)
  scheduler = optim.lr_scheduler.StepLR(opt,
    step_epoch, gamma=args.scheduler_gamma)
  fit(args, model, opt, scheduler,
    train_dl, msi_folder, im_filenames, val_dl, rank=rank)

  if not args.predict:
    # generate video
    if True:
      # generate spiral path video
      ts = np.array([t.numpy() for t in t_targs])
      radius_xyz = np.percentile(np.abs(ts), 90, 0)
      radius_xyz[2] *= 0.8
      cams = generate_spiral_cams2(
        radius_xyz,
        np.array([0.0, 0.0, -dmin]),
        np.array([0.0, 1.0, 0.0]),
        zrate=1, rots=4, N=400
      )
    else:
      # generate sphere path video
      cams = generate_sphere_cams(pt.eye(3), pt.zeros(3))

    video_ds = VideoDataSet(cams)
    video_sampler = DistributedSampler(
      video_ds, num_replicas=world_size, rank=rank)
    video_dl = DataLoader(video_ds, batch_size=args.batch_size,
      sampler=video_sampler, pin_memory=True)

    output_video_folder = msi_folder / 'video'
    output_video_folder.mkdir(parents=True, exist_ok=True)
    model.eval()
    for rb, tb, ib in video_dl:
      for r, t, i in zip(rb, tb, ib):
        xb = [pixel_rays_targs[0], r, t]
        xb = [v.unsqueeze(0).cuda(0) for v in xb]
        with pt.no_grad():
          im_result = patch_render(model, xb[0], xb[1], xb[2],
            args.sample_ray_N * 2).cpu().detach().numpy()
          im_result = im_result.reshape(h, w, -1)
        im_result = (im_result * 255 + 0.5).astype(np.uint8)
        im_result = cv2.cvtColor(im_result, cv2.COLOR_RGB2BGR)
        cv2.imwrite(str(output_video_folder / f'{i:04d}.jpg'), im_result)
        print(f'rendering {i:04d}', end='\r')
    if shutil.which('ffmpeg') is not None:
      os.system(f'ffmpeg -i {output_video_folder}/%04d.jpg -c:v libx264 \
        -pix_fmt yuv420p -vf pad="width=ceil(iw/2)*2:height=ceil(ih/2)*2" \
        -y {output_video_folder}/video.mp4')

  dist.destroy_process_group()

def main():
  output_folder = Path(args.data_folder) / 'output'
  image_folder = output_folder / 'mvs'
  sfm_data_file = output_folder / 'reconstruction_sequential/sfm_data_all.json'
  msi_folder = output_folder / 'msi'
  msi_folder.mkdir(parents=True, exist_ok=True)
  if not args.predict:
    backup_code(msi_folder / 'code')

  mp.spawn(run, args=(args, sfm_data_file,
    msi_folder, image_folder), nprocs=args.gpus)

if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_folder', required=True,
    help='data folder')
  parser.add_argument('--predict', action='store_true',
    help='redict validation images')

  parser.add_argument('--cam_pos_shape', choices=['sphere', 'circle', 'others'],
    required=True, help='geometry shape of cameras to fit')

  parser.add_argument('--gradloss', default=0.05, type=float,
    help='weight parameter for loss1')
  parser.add_argument('--tvc', default=0.03, type=float,
    help='weight parameter for loss2')

  parser.add_argument('-g', '--gpus', default=1, type=int,
    help='number of gpus per node')
  parser.add_argument('--epochs', default=1000, type=int, metavar='N',
    help='number of total epochs to run')
  parser.add_argument('--scheduler_gamma', default=0.3, type=float,
    help='multiplicative factor of learning rate decay')
  parser.add_argument('--train_ratio', default=0.875, type=float,
    help='split ratio for train data')
  parser.add_argument('--batch_size', default=1, type=int,
    help='batch size')
  parser.add_argument('--sample_ray_N', default=8000, type=int,
    help='pixel samples within a batch')
  parser.add_argument('--lr', default=0.1, type=float, metavar='Learning Rate',
    help='learning rate')
  args = parser.parse_args()

  os.environ['MASTER_ADDR'] = 'localhost'
  os.environ['MASTER_PORT'] = '29500'
  main()
