#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

from pathlib import Path
from random import shuffle
from argparse import ArgumentParser, Namespace
from typing import List, Dict, Callable

import torch, os
from torch import Tensor
from torchvision.utils import save_image
from tqdm import tqdm
from pprint import pprint

from gaussian_renderer import render, network_gui
from scene import Scene
from hparam import HyperParams
from utils.camera_utils import Camera
from utils.image_utils import psnr, soft_combine_freqs_torch
from utils.general_utils import safe_state
from utils.loss_utils import l1_loss, ssim    
from utils.training_utils import init_log
from utils.system_utils import mkdir_p

    
def network_gui_handle(render_func:Callable, scene:Scene, steps:int):
    if network_gui.conn == None:
        network_gui.try_connect()
    while network_gui.conn != None:
        try:
            hp = scene.hp
            net_image_bytes = None
            custom_cam, do_training, _, hp.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
            if custom_cam != None:
                # use .cur_gaussian
                rendered = render_func(scene.cur_gaussian, custom_cam, scene.background, scaling_modifer)['render']
                net_image_bytes = memoryview((torch.clamp(rendered, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
            network_gui.send(net_image_bytes, hp.source_path)
            if do_training and (steps < int(hp.iterations) or not keep_alive):
                break
        except Exception as e:
            network_gui.conn = None

def training(args:Namespace, hp:HyperParams):
    ''' Log & Bookkeep '''
    if args.network_gui: network_gui.init(args.ip, args.port)
    writer = init_log(args, hp)
    start_steps = 0

    ''' Model '''
    scene = Scene(hp)
    for freq_idx in scene.all_gaussians.keys():
        gaussians = scene.activate_gaussian(freq_idx)
        gaussians.training_setup()
        if hp.load: 
            start_steps = scene.load_checkpoint(hp.load)

    ''' Train '''
    viewpoint_stack: List[Camera] = None
    start_steps += 1
    pbar = tqdm(range(start_steps, hp.iterations + 1), desc='Training progress')
    
    background = scene.random_background() if hp.random_background else scene.background
        
    for steps in pbar:
        # Debug
        if steps == args.debug_from: hp.debug = True
        if args.network_gui: network_gui_handle(render, scene, steps)

        # Pick a random Camera
        if not viewpoint_stack:
            viewpoint_stack = scene.get_train_cameras().copy()
            shuffle(viewpoint_stack)
        viewpoint_cam = viewpoint_stack.pop()
        
        if steps < hp.joint_train_from_iter:
            separate_training(hp, scene, viewpoint_cam, background, writer, steps, pbar)
        else:
            joint_training(hp, scene, viewpoint_cam, background, writer, steps, pbar)
                                    
        # log
        with torch.no_grad():
            if steps in hp.test_iterations:
                test(scene, steps)
            if steps in hp.save_iterations:
                print(f'[ITER {steps}] Saving Gaussians')
                scene.save_gaussian(steps)
    pbar.close()
                            
def separate_training(hp, scene, viewpoint_cam, background, writer, steps, pbar):
    freq_losses, Ll1_list, psnr_list, n_points = [], [], [], []
    freq_images = []
    for freq_idx in scene.all_gaussians.keys():
        # Decay learning rate            
        gaussians = scene.activate_gaussian(freq_idx)
        gaussians.update_learning_rate(steps)
        if steps % 1000 == 0:
            gaussians.oneupSHdegree()

        # Render
        render_pkg = render(gaussians, viewpoint_cam, background)
        image = render_pkg['render']                        # [C=3, H=545, W=980]
        viewspace_points = render_pkg['viewspace_points']   # [P=182686, pos=3]
        visibility_filter = render_pkg['visibility_filter'] # [P=182686]
        radii = render_pkg['radii']    

        # freq loss
        gt_image = viewpoint_cam.freq_images[freq_idx].cuda()
        Ll1 = l1_loss(image, gt_image)
        Lssim = ssim(image, gt_image)
        loss = (1.0 - hp.lambda_dssim) * Ll1 + hp.lambda_dssim * (1.0 - Lssim)
        psnr_ = psnr(gt_image, image).mean().item()
        
        freq_images.append(image)
        freq_images.append(gt_image)
        freq_losses.append(loss)
        Ll1_list.append(Ll1)
        psnr_list.append(psnr_)
        n_points.append(gaussians.n_points)
        
        writer.add_scalar(f'train/freq_{freq_idx}_Ll1', Ll1, global_step=steps)
        writer.add_scalar(f'train/freq_{freq_idx}_psnr', psnr_, global_step=steps)
            
        loss.backward()
        
        with torch.no_grad():
            if steps < hp.densify_until_iter:
                # Keep track of max radii in image-space for pruning
                gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
                gaussians.add_densification_stats(viewspace_points, visibility_filter)

                if steps > hp.densify_from_iter and steps % hp.densification_interval == 0:
                    gaussians.densify_and_prune(scene.cameras_extent, steps)

                if steps % hp.opacity_reset_interval == 0 or (hp.white_background and steps == hp.densify_from_iter):
                    gaussians.reset_opacity()
                    
            gaussians.optimizer.step()
            gaussians.optimizer.zero_grad(set_to_none=True)
    
    with torch.no_grad():
        if steps % 10 == 0:
            pbar.set_postfix({'L|H-Loss': f'{Ll1_list[0].item():.5f}|{Ll1_list[1].item():.5f}|',
                              'PSNR': f"{psnr_list[0]:.4f}|{psnr_list[1]:.4f}|", 
                              'Pts': f"{n_points[0]}|{n_points[1]}|"})
        if steps % 1000 == 0:
            mkdir_p(scene.model_p / 'vis', parents=True)
            freq_images.append(freq_images[0]+freq_images[2])
            save_image(torch.cat(freq_images, -1), scene.model_p / 'vis' / f"{steps:05d}.png")

def joint_training(hp, scene, viewpoint_cam, background, writer, steps, pbar):
    n_points = []
    freq_images, Ll1_list, freq_losses, psnr_list = [], [], [], []
    viewspace_points_list, visibility_filter_list, radii_list = [], [], []
    gt_list = []

    for freq_idx in scene.all_gaussians.keys():
        # Decay learning rate            
        gaussians = scene.activate_gaussian(freq_idx)
        gaussians.update_learning_rate(steps)
        if steps % 1000 == 0:
            gaussians.oneupSHdegree()

        # Render
        render_pkg = render(gaussians, viewpoint_cam, background) 
        image = render_pkg['render'] 
        viewspace_points_list.append(render_pkg['viewspace_points'] )
        visibility_filter_list.append(render_pkg['visibility_filter'] )
        radii_list.append(render_pkg['radii'])

        # freq loss
        gt_image = viewpoint_cam.freq_images[freq_idx].cuda()
        Ll1 = l1_loss(image, gt_image)
        Lssim = ssim(image, gt_image)
        loss = (1.0 - hp.lambda_dssim) * Ll1 + hp.lambda_dssim * (1.0 - Lssim)
        psnr_ = psnr(gt_image, image).mean().item()
        
        freq_images.append(image)
        gt_list.append(gt_image)
        freq_losses.append(loss)
        Ll1_list.append(Ll1)
        psnr_list.append(psnr_)
        n_points.append(gaussians.n_points)
        
        writer.add_scalar(f'train/freq_{freq_idx}_Ll1', Ll1, global_step=steps)
        writer.add_scalar(f'train/freq_{freq_idx}_psnr', psnr_, global_step=steps)
    
    # combined loss
    gt_image = viewpoint_cam.gt_image.cuda()
    combined_image = sum(freq_images)
    gt_list.append(gt_image)
    freq_images.append(combined_image)
    Ll1 = l1_loss(combined_image, gt_image)
    Lssim = ssim(combined_image, gt_image)
    psnr_ = psnr(combined_image, gt_image).mean().item()
    combined_freq_loss = (1.0 - hp.lambda_dssim) * Ll1 + hp.lambda_dssim * (1.0 - Lssim)
    
    writer.add_scalar(f'train/freq_combine_Ll1', Ll1, global_step=steps)
    writer.add_scalar(f'train/freq_combine_psnr', psnr_, global_step=steps)
    
    loss = hp.lambda_low * freq_losses[0] + hp.lambda_high * freq_losses[1] + \
            (1 - hp.lambda_low - hp.lambda_high) * combined_freq_loss
    
    loss.backward()
    
    if steps % 1000 == 0:
        mkdir_p(scene.model_p / 'vis', parents=True)
        gt = torch.cat(gt_list, -1)
        ge = torch.cat(freq_images, -1)
        save_image(torch.cat([ge, gt], 1), scene.model_p / 'vis' / f"{steps:05d}.png")
    
    # per-freq gaussian (after grad)
    with torch.no_grad():
        for freq_idx in scene.all_gaussians.keys():
            gaussians = scene.activate_gaussian(freq_idx)
            # Optimizer step
            if steps < hp.iterations:
                gaussians.optimizer.step()
                gaussians.optimizer.zero_grad(set_to_none=True)
                
            if steps < hp.densify_until_iter:
                # Keep track of max radii in image-space for pruning
                visibility_filter = visibility_filter_list[freq_idx]
                viewspace_points = viewspace_points_list[freq_idx]
                radii = radii_list[freq_idx]
                gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
                gaussians.add_densification_stats(viewspace_points, visibility_filter)

                if steps > hp.densify_from_iter and steps % hp.densification_interval == 0:
                    gaussians.densify_and_prune(scene.cameras_extent, steps)

                if steps % hp.opacity_reset_interval == 0 or (hp.white_background and steps == hp.densify_from_iter):
                    gaussians.reset_opacity()
                
        if steps % 10 == 0:
            pbar.set_postfix({'L|H|HF-Loss': f'{Ll1_list[0].item():.5f}|{Ll1_list[1].item():.5f}|{Ll1:.5f}',
                              'PSNR': f"{psnr_list[0]:.4f}|{psnr_list[1]:.4f}|{psnr_:.4f}", 
                              'Pts': f"{n_points[0]}|{n_points[1]}|{sum(n_points)}"})

@torch.no_grad()
def test(scene, steps):
    validation_configs = {'train': [scene.get_train_cameras()[idx % len(scene.get_train_cameras())] for idx in range(5, 30, 5)], 
                          'test': scene.get_test_cameras()}

    save_dir = scene.model_p / f'eval_{steps}'
    mkdir_p(save_dir, parents=True)
    
    torch.cuda.empty_cache()
    for split, cameras in validation_configs.items():
        if not cameras or not len(cameras): continue

        l1_test, psnr_test, total = 0.0, 0.0, 0
        for idx, viewpoint in tqdm(enumerate(cameras), desc=f"Evaluating {split}..."):
            freq_images = []
            for freq_idx in scene.all_gaussians.keys():
                gaussians = scene.activate_gaussian(freq_idx)
                freq_images.append(render(gaussians, viewpoint, scene.background)['render'])
            
            combine_image = torch.clip(sum(freq_images), 0.0, 1.0)    
            gt = viewpoint.gt_image.cuda()
            save_image(torch.cat([combine_image, gt], -1), save_dir / f"{split}_{idx:05d}.png")
            l1_test += l1_loss(combine_image, gt).mean()
            psnr_test += psnr(combine_image, gt).mean()
            total += 1
                
        l1_test /= total
        psnr_test /= total
        print(f'============= Evaluating {split}: L1 {l1_test}, PSNR {psnr_test} ================')
    torch.cuda.empty_cache()
    

if __name__ == "__main__":
    # Set up command line argument parser
    parser = ArgumentParser(description="Training script parameters")
    parser.add_argument('--network_gui', action='store_true')
    parser.add_argument('--ip', type=str, default='127.0.0.1')
    parser.add_argument('--port', type=int, default=6009)
    parser.add_argument('--task', type=str, default='test')
    parser.add_argument('--debug_from', type=int, default=-1)
    parser.add_argument('--detect_anomaly', action='store_true')
    parser.add_argument('--quiet', action='store_true')
    parser.add_argument('--nolog', action='store_true', help='no tensorboard logs')
    args, _ = parser.parse_known_args()

    # Initialize system state (RNG)
    safe_state(args.quiet)
    torch.autograd.set_detect_anomaly(args.detect_anomaly)
    
    hp = HyperParams()
    hp.send_to(parser)
    args = parser.parse_args()
    hp.extract_from(args)
    print('Training:', hp.model_path)

    print('Hparams:')
    pprint(vars(hp))
    training(args, hp)

    # Done
    print()
    print('Training complete.')
