# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     inference
   Description :   
   Author :       lth
   date：          2022/12/15
-------------------------------------------------
   Change Activity:
                   2022/12/15 1:30: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import cv2
import imageio
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from common import PE
from config import GetConfig
from datalist import NeRFDataset
from model import NeRF
from utils import get_rays, raw2output, sample_pdf, pose_spherical

total_loss = 9999999999999999999999999
N_rays = 4096
N = 2
to8b = lambda x: (255 * np.clip(x, 0, 1)).astype(np.uint8)


class Train:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

        # data
        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 4, "pin_memory": False}

        self.train_dataloader = DataLoader(NeRFDataset(base_dir="./lego", mode="train"), batch_size=1, shuffle=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(NeRFDataset(base_dir="./lego", mode="test"), batch_size=1, shuffle=True,
                                          **kwargs)

        hwf = self.train_dataloader.dataset.hwf
        self.H, self.W, self.focal = hwf
        self.K = np.array(
            [[self.focal, 0, 0.5 * self.W],
             [0, self.focal, 0.5 * self.H],
             [0, 0, 1]]
        )

        # model
        self.model_coarse = NeRF().to(self.device)
        self.model_fine = NeRF().to(self.device)

        if use_cuda:
            self.model_coarse = torch.nn.DataParallel(self.model_coarse, device_ids=range(torch.cuda.device_count()))
            self.model_fine = torch.nn.DataParallel(self.model_fine, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        # weights_init(self.model_coarse)
        # weights_init(self.model_fine)
        if not self.args.resume:
            try:
                print("\nload the weight from pretrained-weight file")
                model_dict = self.model_coarse.state_dict()
                checkpoint = torch.load("weights/coarse_best.pth")['model_state_dict']
                model_dict.update(checkpoint)
                self.model_coarse.load_state_dict(model_dict, strict=True)

                model_dict = self.model_fine.state_dict()
                checkpoint = torch.load("weights/fine_best.pth")['model_state_dict']
                model_dict.update(checkpoint)
                self.model_fine.load_state_dict(model_dict, strict=True)

                print("Restoring the weight from pretrained-weight file \nFinished loading the weight\n")
            except Exception as e:
                raise e

        self.criterion = nn.MSELoss()

        self.optimizer = optim.Adam(
            lr=self.args.lr,
            params=[
                {"params": self.model_fine.parameters()},
                {"params": self.model_coarse.parameters()}],
            betas=(0.9, 0.999)
        )
        self.use_fine = True

        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer,gamma=0.999)


    @torch.no_grad()
    def inference(self):
        inference_N_Ray = 400 * 400 * 8

        self.model_fine.eval()
        self.model_coarse.eval()

        render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, 40 + 1)[:-1]],
                                   0)

        rgbs = []
        disps = []
        for i, c2w in enumerate(tqdm(render_poses)):
            rays_o, rays_d = get_rays(self.H, self.W, self.K, c2w)
            rays_o, rays_d = rays_o.to(self.device), rays_d.to(self.device)
            view_dir = rays_d
            view_dir = view_dir / torch.norm(view_dir, -1, keepdim=True)
            view_dir = torch.reshape(view_dir, [-1, 3]).float()
            rays_o = torch.reshape(rays_o, [-1, 3]).float()
            rays_d = torch.reshape(rays_d, [-1, 3]).float()
            near = 2
            far = 6
            near, far = torch.reshape(near * torch.ones_like(rays_d[..., :1]), [-1, 1]), torch.reshape(
                far * torch.ones_like(rays_d[..., :1]), [-1, 1])
            rays = torch.cat([rays_o, rays_d, near, far, view_dir], -1)
            #
            rays_o, rays_d = rays[:, 0:3], rays[:, 3:6]
            view_dir_ = rays[:, -3:]
            bounds = torch.reshape(rays[..., 6:8], [-1, 1, 2])
            near, far = bounds[..., 0], bounds[..., 1]
            t_vals = torch.linspace(0, 1, steps=32).to(self.device)
            z_vals = near * (1. - t_vals) + far * t_vals
            pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]

            view_dir = view_dir_[:, None].expand(pts.shape)
            pts = torch.reshape(pts, [-1, pts.shape[-1]])
            view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])

            output_ = []
            for i in range(pts.shape[0] // inference_N_Ray):
                # Position Encode
                pts_encode = PE(pts[i * inference_N_Ray:(i + 1) * inference_N_Ray], 10)
                view_dir_encode = PE(view_dir[i * inference_N_Ray:(i + 1) * inference_N_Ray], 4)
                embedded = torch.cat([pts_encode, view_dir_encode], -1)

                outputs_coarse = self.model_coarse(embedded)
                output_.append(outputs_coarse)
            outputs_coarse = torch.cat(output_)
            outputs_coarse = torch.reshape(outputs_coarse, [self.H * self.W, -1, outputs_coarse.shape[-1]])
            """
            160000 32 4 
            160000 32
            160000 3
            """
            rgb_map, disp_map, acc_map, weights, depth_map = raw2output(outputs_coarse, z_vals, rays_d)

            perturb = 0
            z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
            z_samples = sample_pdf(z_vals_mid, weights[..., 1:-1], 128, det=(perturb == 0.), )
            z_samples = z_samples
            z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
            pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
            view_dir = view_dir_[:, None].expand(pts.shape)
            pts = torch.reshape(pts, [-1, pts.shape[-1]])
            view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])

            output_ = []
            for i in range(pts.shape[0] // inference_N_Ray):
                # Position Encode
                pts_encode = PE(pts[i * inference_N_Ray:(i + 1) * inference_N_Ray], 10)
                view_dir_encode = PE(view_dir[i * inference_N_Ray:(i + 1) * inference_N_Ray], 4)
                embedded = torch.cat([pts_encode, view_dir_encode], -1)

                outputs_fine = self.model_fine(embedded)
                output_.append(outputs_fine)
            outputs_fine = torch.cat(output_)
            outputs_fine = torch.reshape(outputs_fine, [self.H * self.W, -1, outputs_fine.shape[-1]])
            """
            160000 160 4
            160000 160
            160000 3
            """
            rgb0_map, disp0_map, acc0_map, weights0, depth0_map = raw2output(outputs_fine, z_vals, rays_d)

            rgbs.append(rgb0_map.cpu().numpy())
            disps.append(disp0_map.cpu().numpy())

        for index, rgb in enumerate(rgbs):
            img = to8b(rgb.reshape((400, 400, 3)))
            cv2.imwrite("to2gif/" + str(index).zfill(4) + ".jpg", img)
            imageio.mimwrite("rgb.mp4", img.transpose(2, 0, 1), fps=30,
                             quality=8)


if __name__ == "__main__":
    model = Train()
    model.inference()
