# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/12/5
-------------------------------------------------
   Change Activity:
                   2022/12/5 17:55: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import cv2
import imageio
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from common import PE
from config import GetConfig
from datalist import NeRFDataset
from model import NeRF
from utils import get_rays, raw2output, sample_pdf, pose_spherical

total_loss = 9999999999999999999999999
N_rays = 4096
N = 2


# to8b = lambda x: (255 * np.clip(x, 0, 1)).astype(np.uint8)


def to8b(x):
    return (255 * np.clip(x, 0, 1)).astype(np.uint8)


class Train:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

        # data
        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 4, "pin_memory": False}

        self.train_dataloader = DataLoader(NeRFDataset(base_dir="./lego", mode="train"), batch_size=1, shuffle=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(NeRFDataset(base_dir="./lego", mode="test"), batch_size=1, shuffle=True,
                                          **kwargs)

        hwf = self.train_dataloader.dataset.hwf
        self.H, self.W, self.focal = hwf
        self.K = np.array(
            [[self.focal, 0, 0.5 * self.W],
             [0, self.focal, 0.5 * self.H],
             [0, 0, 1]]
        )

        # model
        self.model_coarse = NeRF().to(self.device)
        self.model_fine = NeRF().to(self.device)

        if use_cuda:
            self.model_coarse = torch.nn.DataParallel(self.model_coarse, device_ids=range(torch.cuda.device_count()))
            self.model_fine = torch.nn.DataParallel(self.model_fine, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        # weights_init(self.model_coarse)
        # weights_init(self.model_fine)
        if not self.args.resume:
            try:
                print("\nload the weight from pretrained-weight file")
                model_dict = self.model_coarse.state_dict()
                checkpoint = torch.load("weights/coarse_best.pth")['model_state_dict']
                model_dict.update(checkpoint)
                self.model_coarse.load_state_dict(model_dict, strict=True)

                model_dict = self.model_fine.state_dict()
                checkpoint = torch.load("weights/fine_best.pth")['model_state_dict']
                model_dict.update(checkpoint)
                self.model_fine.load_state_dict(model_dict, strict=True)

                print("Restoring the weight from pretrained-weight file \nFinished loading the weight\n")
            except Exception as e:
                raise e

        self.criterion = nn.MSELoss()

        self.optimizer = optim.Adam(
            lr=self.args.lr,
            params=[
                {"params": self.model_fine.parameters()},
                {"params": self.model_coarse.parameters()}],
            betas=(0.9, 0.999)
        )
        self.use_fine = True

        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.999)

    def work(self):
        torch.cuda.empty_cache()
        for epoch in range(1, self.args.epochs + 1):
            self.train(epoch)
            if epoch % 50 == 0:
                self.inference()
            self.scheduler.step()
        torch.cuda.empty_cache()
        print("model finish training")

    def train(self, epoch):
        self.model_coarse.train()
        self.model_fine.train()

        average_loss = []

        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch: {epoch}/{self.args.epochs}')

        for img, pose in pbar:
            img, pose = img[0].float().to(self.device), pose[0][:3, :4]
            # region  ray sampling
            rays_o, rays_d = get_rays(self.H, self.W, self.K, pose)
            # choose the region to random sample
            if epoch <= 100:
                dH = int(self.H // 2 * 0.5)
                dW = int(self.W // 2 * 0.5)
                coords = torch.stack(
                    torch.meshgrid(
                        torch.linspace(self.H // 2 - dH, self.H // 2 + dH - 1, 2 * dH),
                        torch.linspace(self.W // 2 - dW, self.W // 2 + dW - 1, 2 * dW)
                    ), -1)
            else:
                coords = torch.stack(
                    torch.meshgrid(torch.linspace(0, self.H - 1, self.H), torch.linspace(0, self.W - 1, self.W)),
                    -1)
            # choose randomly to make the batch
            coords = torch.reshape(coords, [-1, 2])
            select_indexs = np.random.choice(coords.shape[0], size=[N_rays], replace=False)
            select_coords = coords[select_indexs].long()
            rays_o = rays_o[select_coords[:, 0], select_coords[:, 1]]  # (N_rand, 3)
            rays_d = rays_d[select_coords[:, 0], select_coords[:, 1]]  # (N_rand, 3)
            batch_rays = torch.stack([rays_o, rays_d], 0)
            target_s = img[select_coords[:, 0], select_coords[:, 1]]  # (N_rand, 3)
            # endregion
            self.optimizer.zero_grad()
            # avoid OOM we train the model desperately,by divide the batch into Ns
            for i in range(N):
                target = target_s[i * (N_rays // N):(i + 1) * (N_rays // N), :]
                rays_o, rays_d = batch_rays[:, i * (N_rays // N):(i + 1) * (N_rays // N), :].to(self.device)
                view_dir = rays_d
                view_dir = view_dir / torch.norm(view_dir, -1, keepdim=True)
                view_dir = torch.reshape(view_dir, [-1, 3]).float()

                rays_o = torch.reshape(rays_o, [-1, 3]).float()
                rays_d = torch.reshape(rays_d, [-1, 3]).float()
                near = 2
                far = 6
                near, far = near * torch.ones_like(rays_d[..., :1]), far * torch.ones_like(rays_d[..., :1])
                rays = torch.cat([rays_o, rays_d, near, far, view_dir], -1)

                rays_o, rays_d = rays[:, 0:3], rays[:, 3:6]
                view_dir_ = rays[:, -3:]
                bounds = torch.reshape(rays[..., 6:8], [-1, 1, 2])
                near, far = bounds[..., 0], bounds[..., 1]

                t_vals = torch.linspace(0, 1, steps=64).to(self.device)

                linear_display = False
                if not linear_display:
                    z_vals = near * (1. - t_vals) + far * (t_vals)
                else:
                    z_vals = 1. / (1. / near * (1. - t_vals) + 1. / far * (t_vals))
                z_vals = z_vals.to(self.device)
                z_vals = z_vals.expand([(N_rays // N), 64])
                perturb = 0
                if perturb > 0.:
                    # get intervals between samples
                    mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
                    upper = torch.cat([mids, z_vals[..., -1:]], -1)
                    lower = torch.cat([z_vals[..., :1], mids], -1)
                    # stratified samples in those intervals
                    t_rand = torch.rand(z_vals.shape)

                    z_vals = lower + (upper - lower) * t_rand
                pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]

                view_dir = view_dir_[:, None].expand(pts.shape)
                pts = torch.reshape(pts, [-1, pts.shape[-1]])
                view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])
                # Position Encode
                pts_encode = PE(pts, 10)
                view_dir_encode = PE(view_dir, 4)

                embedded = torch.cat([pts_encode, view_dir_encode], -1).to(self.device)

                outputs_coarse = self.model_coarse(embedded)
                outputs_coarse = torch.reshape(outputs_coarse, [(N_rays // N), -1, outputs_coarse.shape[-1]])

                rgb_map, disp_map, acc_map, weights, depth_map = raw2output(outputs_coarse, z_vals, rays_d)

                Loss = self.criterion(rgb_map, target) / N
                if self.use_fine:
                    z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
                    z_samples = sample_pdf(z_vals_mid, weights[..., 1:-1], 128, det=(perturb == 0.), )
                    z_samples = z_samples.detach()
                    z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
                    pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
                    view_dir = view_dir_[:, None].expand(pts.shape)
                    pts = torch.reshape(pts, [-1, pts.shape[-1]])
                    view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])
                    # Position Encode
                    pts_encode = PE(pts, 10)
                    view_dir_encode = PE(view_dir, 4)
                    embedded = torch.cat([pts_encode, view_dir_encode], -1).to(self.device)
                    outputs_fine = self.model_fine(embedded)
                    outputs_fine = torch.reshape(outputs_fine, [(N_rays // N), -1, outputs_fine.shape[-1]])
                    rgb0_map, disp0_map, acc0_map, weights0, depth0_map = raw2output(outputs_fine, z_vals, rays_d)
                    Loss += self.criterion(rgb0_map, target) / N

                Loss.backward()
                self.optimizer.step()

                average_loss.append(Loss.item())

            pbar.set_description(
                f'Train Epoch: {epoch}/{self.args.epochs} '
                f' train_loss: {np.mean(average_loss)} '
                f' learning_rate: {self.optimizer.state_dict()["param_groups"][0]["lr"]}'

            )
        global total_loss
        if np.mean(average_loss) < total_loss:
            torch.save({
                'model_state_dict': self.model_coarse.state_dict(),
            },
                './weights/coarse_best.pth')
            torch.save({
                'model_state_dict': self.model_fine.state_dict(),
            },
                './weights/fine_best.pth')
            print("model saved")
            total_loss = np.mean(average_loss)

    @torch.no_grad()
    def inference(self):
        inference_N_Ray = 400 * 400 * 8

        self.model_fine.eval()
        self.model_coarse.eval()

        render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, 40 + 1)[:-1]],
                                   0)

        rgbs = []
        disps = []
        for i, c2w in enumerate(tqdm(render_poses)):
            rays_o, rays_d = get_rays(self.H, self.W, self.K, c2w)
            rays_o, rays_d = rays_o.to(self.device), rays_d.to(self.device)
            view_dir = rays_d
            view_dir = view_dir / torch.norm(view_dir, -1, keepdim=True)
            view_dir = torch.reshape(view_dir, [-1, 3]).float()
            rays_o = torch.reshape(rays_o, [-1, 3]).float()
            rays_d = torch.reshape(rays_d, [-1, 3]).float()
            near = 2
            far = 6
            near, far = torch.reshape(near * torch.ones_like(rays_d[..., :1]), [-1, 1]), torch.reshape(
                far * torch.ones_like(rays_d[..., :1]), [-1, 1])
            rays = torch.cat([rays_o, rays_d, near, far, view_dir], -1)
            #
            rays_o, rays_d = rays[:, 0:3], rays[:, 3:6]
            view_dir_ = rays[:, -3:]
            bounds = torch.reshape(rays[..., 6:8], [-1, 1, 2])
            near, far = bounds[..., 0], bounds[..., 1]
            t_vals = torch.linspace(0, 1, steps=32).to(self.device)
            z_vals = near * (1. - t_vals) + far * t_vals
            pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]

            view_dir = view_dir_[:, None].expand(pts.shape)
            pts = torch.reshape(pts, [-1, pts.shape[-1]])
            view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])

            output_ = []
            for i in range(pts.shape[0] // inference_N_Ray):
                # Position Encode
                pts_encode = PE(pts[i * inference_N_Ray:(i + 1) * inference_N_Ray], 10)
                view_dir_encode = PE(view_dir[i * inference_N_Ray:(i + 1) * inference_N_Ray], 4)
                embedded = torch.cat([pts_encode, view_dir_encode], -1)

                outputs_coarse = self.model_coarse(embedded)
                output_.append(outputs_coarse)
            outputs_coarse = torch.cat(output_)
            outputs_coarse = torch.reshape(outputs_coarse, [self.H * self.W, -1, outputs_coarse.shape[-1]])
            """
            160000 32 4 
            160000 32
            160000 3
            """
            rgb_map, disp_map, acc_map, weights, depth_map = raw2output(outputs_coarse, z_vals, rays_d)

            perturb = 0
            z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
            z_samples = sample_pdf(z_vals_mid, weights[..., 1:-1], 128, det=(perturb == 0.), )
            z_samples = z_samples
            z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
            pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
            view_dir = view_dir_[:, None].expand(pts.shape)
            pts = torch.reshape(pts, [-1, pts.shape[-1]])
            view_dir = torch.reshape(view_dir, [-1, view_dir.shape[-1]])

            output_ = []
            for i in range(pts.shape[0] // inference_N_Ray):
                # Position Encode
                pts_encode = PE(pts[i * inference_N_Ray:(i + 1) * inference_N_Ray], 10)
                view_dir_encode = PE(view_dir[i * inference_N_Ray:(i + 1) * inference_N_Ray], 4)
                embedded = torch.cat([pts_encode, view_dir_encode], -1)

                outputs_fine = self.model_fine(embedded)
                output_.append(outputs_fine)
            outputs_fine = torch.cat(output_)
            outputs_fine = torch.reshape(outputs_fine, [self.H * self.W, -1, outputs_fine.shape[-1]])
            """
            160000 160 4
            160000 160
            160000 3
            """
            rgb0_map, disp0_map, acc0_map, weights0, depth0_map = raw2output(outputs_fine, z_vals, rays_d)

            rgbs.append(rgb_map.cpu().numpy())
            disps.append(disp_map.cpu().numpy())

        for index, rgb in enumerate(rgbs):
            img = to8b(rgb.reshape((400, 400, 3)))
            cv2.imwrite("to2gif/" + str(index).zfill(4) + ".jpg", img)
            imageio.mimwrite("rgb.mp4", img.transpose(2, 0, 1), fps=30,
                             quality=8)


if __name__ == "__main__":
    model = Train()
    model.inference()
