import os
import sys
os.environ["GLOG_v"] = "3"

import time
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import mindspore
import mindspore as md
import mindspore.ops.operations as P
import numpy as np
from mindspore import Tensor, nn, ops
from tqdm import tqdm
from data.load_scannet import load_scannet_data
from nerf_utils import *
from models import VolumeRenderer

# main func
config = get_config()

# environment config
mode = "GRAPH_MODE"
seed = 3407
device_id = 0
device = "GPU"
init = False

# Set up running device
def context_setup(idx, device='GPU', mode=md.context.GRAPH_MODE):
    if init:
        return
    if device == "CPU":
        raise NotImplementedError("`cumprod` ops does not support CPU")
    md.context.set_context(mode=mode, device_target=device, device_id=idx)

md.set_seed(seed)
context_setup(config.device_id, config.device, getattr(md.context, config.mode))

# Output directory
base_dir = config.base_dir
if not os.path.exists(base_dir):
    os.makedirs(base_dir)

# Experiment name
exp_name = config.dataset_type + "_" + config.name
# Get the experiment number
exp_num = max([int(fn.split("_")[-1]) for fn in os.listdir(base_dir) if fn.find(exp_name) >= 0] + [0])
if config.no_reload:
    exp_num += 1

# Output directory
out_dir = os.path.join(base_dir, exp_name + "_" + str(exp_num))
os.makedirs(out_dir, exist_ok=True)

# data config
half_res = False
testskip = 8
white_bkgd = False
render_test = True

# network config
chunk = config.chunk
cap_n_samples = config.cap_n_samples
cap_n_importance = config.cap_n_importance
netchunk = config.net_chunk
lrate = config.l_rate

# runner config
cap_n_iters = config.cap_n_iters
cap_n_rand = config.cap_n_rand
lrate_decay = config.l_rate_decay

# steps config
i_ckpt = config.i_ckpt
i_testset = config.i_testset
i_print = config.i_print

# load data
images, poses, render_poses, depths, viewinfo, hwf, i_split = load_scannet_data(config.data_dir)
print('Loaded scannet', images.shape, render_poses.shape, hwf, config.data_dir)
i_train, i_fvs, i_nvs = i_split

near = 0.1
far = 3.0

# num_images = len(train_imgs)
# print(num_images)
# temp_fname = str(train_imgs[np.random.randint(low=0, high=num_images)])
# tmp_img = cv2.imread(temp_fname, cv2.IMREAD_UNCHANGED)
# tmp_img = cv2.cvtColor(tmp_img, cv2.COLOR_BGRA2RGBA)

# plt.imshow(tmp_img)
# plt.show()

def trans_t(t):
    return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1]],
                    dtype=np.float32)


def rot_phi(phi):
    return np.array([[1, 0, 0, 0], [0, np.cos(phi), -np.sin(phi), 0],
                     [0, np.sin(phi), np.cos(phi), 0], [0, 0, 0, 1]],
                    dtype=np.float32)


def rot_theta(th):
    return np.array([[np.cos(th), 0, -np.sin(th), 0], [0, 1, 0, 0],
                     [np.sin(th), 0, np.cos(th), 0], [0, 0, 0, 1]],
                    dtype=np.float32)


def pose_spherical(theta, phi, radius):
    """pose_spherical"""
    c2w = trans_t(radius)
    c2w = np.matmul(rot_phi(phi / 180. * np.pi), c2w)
    c2w = np.matmul(rot_theta(theta / 180. * np.pi), c2w)
    c2w = np.matmul(
        np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
                 dtype=np.float32), c2w)
    return c2w


if white_bkgd:
    images = images[..., :3] * images[..., -1:] + (1. - images[..., -1:])
else:
    images = images[..., :3]

if render_test:
    render_poses = poses[i_fvs.tolist()]

# Cast intrinsics to right types
cap_h, cap_w, focal = hwf
cap_h, cap_w = int(cap_h), int(cap_w)

hwf = [cap_h, cap_w, focal]

class NeRFMLP(nn.Cell):
    """
    NeRF MLP architecture.

    Args:
        cap_d (int, optional): Model depth. Default: 8.
        cap_w (int, optional): Model width. Default: 256.
        input_ch (int, optional): Input channel. Default: 3.
        input_ch_views (int, optional): Input view channel. Default: 3.
        output_ch (int, optional): Output channel. Default: 4.
        skips (tuple, optional): Skip connection layer index. Default: (4).
        use_view_dirs (bool, optional): Use view directions or not. Default: False.

    Inputs:
        - **x** (Tensor) - Query tensors. points and view directions (..., 6).

    Outputs:
        Tensor, query features (..., feature_dims).

    Supported Platforms:
        ``Ascend`` ``GPU`` ``CPU``

    Examples:
        >>> model = NeRFMLP()
        >>> inputs = ms.numpy.randn(1, 3)
        >>> outputs = model(inputs)
        [[0.2384 0.8456 0.6845 0.1584]]
    """

    def __init__(
            self,
            cap_d=8,
            cap_w=256,
            input_ch=3,
            input_ch_views=3,
            output_ch=4,
            skips=(4),
            use_view_dirs=False,
    ):
        super().__init__()
        self.cap_d = cap_d
        self.cap_w = cap_w
        self.input_ch = input_ch
        self.input_ch_views = input_ch_views
        self.skips = skips
        self.use_view_dirs = use_view_dirs

        self.pts_linears = nn.CellList([nn.Dense(in_channels=input_ch, out_channels=cap_w)] + [
            nn.Dense(in_channels=cap_w, out_channels=cap_w) if i not in
            self.skips else nn.Dense(in_channels=cap_w + input_ch, out_channels=cap_w) for i in range(cap_d - 1)
        ])

        self.views_linears = nn.CellList([nn.Dense(in_channels=input_ch_views + cap_w, out_channels=cap_w // 2)])

        if use_view_dirs:
            self.feature_linear = nn.Dense(in_channels=cap_w, out_channels=cap_w)
            self.alpha_linear = nn.Dense(in_channels=cap_w, out_channels=1)
            self.rgb_linear = nn.Dense(in_channels=cap_w // 2, out_channels=3)
        else:
            self.output_linear = nn.Dense(in_channels=cap_w, out_channels=output_ch)

    def construct(self, x):
        """NeRF MLP construct"""
        input_pts, input_views = x[..., :self.input_ch], x[..., self.input_ch:]
        h = input_pts
        for i, _ in enumerate(self.pts_linears):
            h = self.pts_linears[i](h)
            h = P.ReLU()(h)
            if i in self.skips:
                h = P.Concat(-1)([input_pts, h])

        if self.use_view_dirs:
            alpha = self.alpha_linear(h)
            feature = self.feature_linear(h)
            h = P.Concat(-1)([feature, input_views])

            for i, _ in enumerate(self.views_linears):
                h = self.views_linears[i](h)
                h = P.ReLU()(h)

            rgb = self.rgb_linear(h)
            outputs = P.Concat(-1)([rgb, alpha])
        else:
            outputs = self.output_linear(h)

        return outputs

class RendererWithCriterion(nn.Cell):
    """
    Renderer with criterion.

    Args:
        renderer (nn.Cell): Renderer.
        loss_fn (nn.Cell, optional): Loss function. Default: nn.MSELoss().

    Inputs:
        - **rays** (Tensor) - Rays tensor.
        - **gt** (Tensor) - Ground truth tensor.

    Outputs:
        Tensor, loss for one forward pass.
    """
    def __init__(self, renderer, loss_fn=nn.MSELoss()):
        """Renderer with criterion."""
        super().__init__()
        self.renderer = renderer
        self.loss_fn = loss_fn
        print("RendererWithCriterion: loss_fn: ", loss_fn)

    def construct(self, rays, gt):
        """Renderer Trainer construct."""
        rgb_map_fine, rgb_map_coarse = self.renderer(rays)
        return self.loss_fn(rgb_map_fine, gt) + self.loss_fn(rgb_map_coarse, gt)


# Create nerf model
start_iter, optimizer, model_coarse, model_fine, embed_fn, embeddirs_fn = create_nerf(config, out_dir)
# Training steps
global_steps = start_iter
# Create volume renderer
renderer = VolumeRenderer(
    config.chunk,
    config.cap_n_samples,
    config.cap_n_importance,
    config.net_chunk,
    config.white_bkgd,
    model_coarse,
    model_fine,
    embed_fn,
    embeddirs_fn,
    near,
    far,
)

renderer.model_coarse.to_float(md.dtype.float16)
if renderer.model_fine is not None:
    renderer.model_fine.to_float(md.dtype.float16)

renderer_with_criteron = RendererWithCriterion(renderer)
optimizer = nn.Adam(params=renderer.trainable_params(),
                    learning_rate=lrate,
                    beta1=0.9,
                    beta2=0.999)

grad_scale = ops.MultitypeFuncGraph("grad_scale")
@grad_scale.register("Tensor", "Tensor")
def gradient_scale(scale, grad):
    return grad * ops.cast(scale, ops.dtype(grad))

class CustomTrainOneStepCell(nn.TrainOneStepCell):
    def __init__(self, network, optimizer, sens=1.0):
        super(CustomTrainOneStepCell, self).__init__(network, optimizer, sens)
        self.hyper_map = ops.HyperMap()
        self.reciprocal_sense = Tensor(1 / sens, mindspore.float32)

    def scale_grad(self, gradients):
        gradients = self.hyper_map(ops.partial(grad_scale, self.reciprocal_sense), gradients)
        return gradients

    def construct(self, *inputs):
        loss = self.network(*inputs)
        sens = ops.fill(loss.dtype, loss.shape, self.sens)
        # calculate gradients, the sens will equal to the loss_scale
        grads = self.grad(self.network, self.weights)(*inputs, sens)
        # gradients / loss_scale
        grads = self.scale_grad(grads)
        # reduce gradients in distributed scenarios
        grads = self.grad_reducer(grads)
        loss = ops.depend(loss, self.optimizer(grads))
        return loss

def generate_rays(h, w, f, pose):
    '''
    Given an image plane, generate rays from the camera origin to each pixel on the image plane.

    Arguments:
        h: height of the image plane.
        w: width of the image plane.
        f: focal length of the image plane.
        pose: the extrinsic parameters of the camera. (3, 4) or (4, 4)

    Returns:
        A tuple: origins of rays, directions of rays
    '''

    # Coordinates of the 2D grid
    cols = md.ops.ExpandDims()(
        md.numpy.linspace(-1.0 * w / 2, w - 1 - w / 2, w) / f,
        0).repeat(h, axis=0)  # (h, w)
    rows = md.ops.ExpandDims()(
        -1.0 * md.numpy.linspace(-1.0 * h / 2, h - 1 - h / 2, h) / f,
        1).repeat(w, axis=1)  # (h, w)

    # Ray directions for all pixels
    ray_dirs = md.numpy.stack([cols, rows, -1.0 * md.numpy.ones_like(cols)],
                              axis=-1)  # (h, w, 3)
    # Apply rotation transformation to make each ray orient according to the camera
    unsqueeze_op = md.ops.ExpandDims()
    ray_dirs = md.numpy.sum(unsqueeze_op(ray_dirs, 2) * pose[:3, :3], axis=-1)
    # Origin position
    rays_oris = pose[:3, -1].expand_as(ray_dirs)  # (h, w, 3)

    return rays_oris, ray_dirs.astype(pose.dtype)  # (h, w, 3), (h, w, 3)


def train_net(iter_, train_renderer, optimizer, rays, gt):
    '''
    Train a network.

    Arguments:
        config: configuration.
        iter_: current iterations.
        renderer: a volume renderer.
        optimizer: a network optimizer.
        rays: a batch of rays for training. (#rays * #samples, 6)
        gt: the groundtruth.

    Returns:
        A tuple: (MSE loss, PSNR).
    '''
    loss = train_renderer(rays, gt)

    # Update learning rate
    decay_rate = 0.1
    decay_steps = lrate_decay * 1000
    new_lrate = lrate * (decay_rate**(iter_ / decay_steps))
    optimizer.learning_rate = md.Parameter(new_lrate)

    return float(loss), float(psnr_from_mse(loss))


def test_net(img_h,
             img_w,
             focal,
             renderer,
             test_poses,
             gt=None,
             on_progress=None,
             on_complete=None):
    '''
    Test the network and generate results.

    Arguments:
        img_h: height of image plane.
        img_w: width of image plane.
        focal: focal length.
        renderer: the volume renderer.
        test_poses: poses used to test the network. (#poses, 4, 4)
        on_progress: a callback function invoked per generation of a result.
        on_complete: a callback function invoked after generating all results.

    Returns:
        A tuple: (Mean test time, MSE loss, PSNR).
    '''

    rgb_maps = []
    loss_ls = []
    psnr_ls = []
    time_ls = []

    reshape_op = md.ops.Reshape()
    stack_op = md.ops.Stack(axis=0)

    image_list = []

    for j, test_pose in enumerate(test_poses):
        t0 = time.time()

        # Generate rays for all pixels
        ray_oris, ray_dirs = generate_rays(img_h, img_w, focal, test_pose)
        ray_oris = reshape_op(ray_oris, (-1, 3))
        ray_dirs = reshape_op(ray_dirs, (-1, 3))
        test_batch_rays = stack_op([ray_oris, ray_dirs])

        # Retrieve testing results
        rgb_map, _ = renderer.inference(test_batch_rays)
        rgb_map = reshape_op(rgb_map, (img_h, img_w, 3))
        rgb_maps.append(rgb_map.asnumpy())

        # If given groundtruth, compute MSE and PSNR
        if gt is not None:
            loss = mse(rgb_map, gt[j])
            psnr = psnr_from_mse(loss)
            loss_ls.append(float(loss))
            psnr_ls.append(float(psnr))

        time_ls.append(time.time() - t0)

        # Handle each testing result
        if on_progress:
            if isinstance(on_progress, list):
                on_progress[0](j, rgb_maps[-1])
                if gt is not None:
                    on_progress[1](j, gt[j].asnumpy())
            else:
                on_progress(j, rgb_maps[-1])

        image_list.append(to8b(rgb_maps[-1]))

    # Handle all testing results
    if on_complete:
        on_complete(np.stack(rgb_maps, 0))

    if not loss_ls:
        loss_ls = [0.0]
    if not psnr_ls:
        psnr_ls = [0.0]
    if not time_ls:
        time_ls = [0.0]

    return np.mean(time_ls), np.mean(loss_ls), np.mean(psnr_ls), image_list


def to8b(x):
    """Convert normalized color to 8-bit color"""
    return (255 * np.clip(x, 0.0, 1.0)).astype(np.uint8)


def mse(im1, im2):
    '''
    MSE between two images.
    '''

    return md.numpy.mean((im1 - im2)**2)


psnr_from_mse_base = md.Tensor([10.0])


def psnr_from_mse(v):
    '''
    Convert MSE to PSNR.
    '''
    return -10.0 * (md.numpy.log(v) / md.numpy.log(psnr_from_mse_base))


def sample_grid_2d(cap_h, cap_w, cap_n):
    """
    Sample cells in an cap_h x cap_w mesh grid.

    Args:
        cap_h (int): Height of the mesh grid.
        cap_w (int): Width of the mesh grid.
        cap_n (int): The number of samples.

    Returns:
        Tuple of 2 Tensor, sampled rows and sampled columns.

        - **select_coords_x** (Tensor) - Sampled rows.
        - **select_coords_y** (Tensor) - Sampled columns.
    """
    if cap_n > cap_w * cap_h:
        cap_n = cap_w * cap_h

    # Create a 2D mesh grid where each element is the coordinate of the cell
    stack_op = md.ops.Stack(-1)
    coords = stack_op(
        md.numpy.meshgrid(
            md.numpy.linspace(0, cap_h - 1, cap_h),
            md.numpy.linspace(0, cap_w - 1, cap_w),
            indexing="ij",
        ))
    # Flat the mesh grid
    coords = md.ops.Reshape()(coords, (-1, 2))
    # Sample N cells in the mesh grid
    select_indexes = np.random.choice(coords.shape[0],
                                      size=[cap_n],
                                      replace=False)
    # Sample N cells among the mesh grid
    select_coords = coords[select_indexes.tolist()].astype("int32")

    return select_coords[:, 0], select_coords[:, 1]


def sample_along_rays(near,
                      far,
                      cap_cap_n_samples,
                      lin_disp=False,
                      perturb=True):
    """
    Sample points along rays.

    Args:
        near (Tensor): A vector containing nearest point for each ray. (cap_n_rays).
        far (Tensor): A vector containing furthest point for each ray. (cap_n_rays).
        cap_n_samples (int): The number of sampled points for each ray.
        lin_disp (bool): True for sample linearly in inverse depth rather than in depth (used for some datasets).
        perturb (bool): True for stratified sampling. False for uniform sampling.

    Returns:
        Tensor, samples where j-th component of the i-th row is the j-th sampled position along the i-th ray.
    """
    # The number of rays
    cap_n_rays = near.shape[0]

    # Uniform samples along rays
    t_vals = md.numpy.linspace(0.0, 1.0, num=cap_cap_n_samples)
    if not lin_disp:
        z_vals = near * (1.0 - t_vals) + far * t_vals
    else:
        z_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * t_vals)

    expand_op = md.ops.BroadcastTo((cap_n_rays, cap_cap_n_samples))
    z_vals = expand_op(z_vals)

    if perturb:
        # Get intervals between samples
        mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
        cat_op = md.ops.Concat(-1)
        upper = cat_op([mids, z_vals[..., -1:]])
        lower = cat_op([z_vals[..., :1], mids])
        # Stratified samples in those intervals
        t_rand = md.numpy.rand(z_vals.shape)
        z_vals = lower + (upper - lower) * t_rand

    return z_vals


def sample_pdf(bins, weights, cap_cap_n_samples, det=False):
    """
    Sample pdf function.

    Args:
        bins (int): The number of bins for pdf.
        weights (Tensor): The estimated weights.
        cap_cap_n_samples (int): The number of points to be sampled.
        det (bool, optional): Deterministic run or not. Default: False.

    Returns:
        Tensor, sampled pdf tensor.
    """
    weights = weights + 1e-5
    pdf = weights / md.numpy.sum(weights, -1, keepdims=True)
    cdf = md.numpy.cumsum(pdf, -1)
    cdf = md.ops.Concat(-1)([md.numpy.zeros_like(cdf[..., :1]), cdf])

    # Take uniform samples
    temp_shape = cdf.shape[:-1]
    cap_cap_n_samples_new = cap_cap_n_samples
    temp_shape_new = list(temp_shape) + [cap_cap_n_samples_new]
    if det:
        u = md.numpy.linspace(0.0, 1.0, num=cap_cap_n_samples)
        expand_op = md.ops.BroadcastTo(temp_shape_new)
        u = expand_op(u)
    else:
        u = md.numpy.rand(temp_shape_new)

    # Invert CDF
    indexes = nd_searchsorted(cdf, u)

    below = md.numpy.maximum(md.numpy.zeros_like(indexes - 1), indexes - 1)
    above = md.numpy.minimum((cdf.shape[-1] - 1) * md.numpy.ones_like(indexes),
                             indexes)
    indexes_g = md.ops.Stack(axis=-1)([below, above])

    matched_shape = (indexes_g.shape[0], indexes_g.shape[1], cdf.shape[-1])
    gather_op = md.ops.GatherD()
    unsqueeze_op = md.ops.ExpandDims()
    expand_op = md.ops.BroadcastTo(matched_shape)
    cdf_g = gather_op(expand_op(unsqueeze_op(cdf, 1)), 2, indexes_g)
    bins_g = gather_op(expand_op(unsqueeze_op(bins, 1)), 2, indexes_g)

    denom = cdf_g[..., 1] - cdf_g[..., 0]
    denom = md.numpy.where(denom < 1e-5, md.numpy.ones_like(denom), denom)
    t = (u - cdf_g[..., 0]) / denom
    samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])

    return samples


def nd_searchsorted(cdf, u):
    """N-dim searchsorted.

    Args:
        cdf (Tensor): The cdf sampling weights.
        u (Tensor): The interval tensors.

    Returns:
        Tensor, index after searchsorted ops.
    """
    spatial_shape = cdf.shape[:-1]
    last_dim_cdf, last_dim_u = cdf.shape[-1], u.shape[-1]
    cdf_, u_ = cdf.view(-1, last_dim_cdf), u.view(-1, last_dim_u)
    indexes_ls = []

    for i in range(cdf_.shape[0]):
        indexes_ls.append(cdf_[i].searchsorted(u_[i], side="right"))
    indexes = md.ops.Stack(axis=0)(indexes_ls)
    indexes = indexes.view(*spatial_shape, last_dim_u)
    return indexes

loss_scale = 1024.0
train_renderer = CustomTrainOneStepCell(renderer_with_criteron, optimizer, loss_scale)
train_renderer.set_train()

train_image_list = []
with tqdm(range(1, cap_n_iters + 1)) as pbar:
    pbar.n = start_iter

    for i in pbar:
        # Show progress
        pbar.set_description(f'Iter {global_steps + 1:d}')
        pbar.update()

        # Start time of the current iteration
        time0 = time.time()

        img_i = int(np.random.choice(i_train))

        target = images[img_i]
        pose = poses[img_i, :3, :4]

        if cap_n_rand is not None:
            rays_o, rays_d = generate_rays(
                cap_h, cap_w, focal,
                pose)  # (cap_h, cap_w, 3), (cap_h, cap_w, 3)
            sampled_rows, sampled_cols = sample_grid_2d(
                cap_h, cap_w, cap_n_rand)
            rays_o = rays_o[sampled_rows, sampled_cols]  # (cap_n_rand, 3)
            rays_d = rays_d[sampled_rows, sampled_cols]  # (cap_n_rand, 3)

            batch_rays = md.ops.Stack(axis=0)([rays_o, rays_d])
            target_s = target[sampled_rows, sampled_cols]  # (cap_n_rand, 3)

        loss, psnr = train_net(global_steps, train_renderer, optimizer,
                               batch_rays, target_s)

        pbar.set_postfix(time=time.time() - time0, loss=loss, psnr=psnr)

        # Logging
        # Save training states
        if (global_steps + 1) % i_ckpt == 0:
            path = os.path.join(out_dir, f"{global_steps + 1:06d}.tar")

            md.save_checkpoint(
                save_obj=renderer,
                ckpt_file_name=path,
                append_dict={"global_steps": global_steps},
                async_save=True,
            )
            pbar.write(f"Saved checkpoints at {path}")

        # Save testing results
        if (global_steps + 1) % i_testset == 0:
            test_save_dir = os.path.join(out_dir, f"test_{global_steps + 1:06d}")
            os.makedirs(test_save_dir, exist_ok=True)

            pbar.write(f"Testing (iter={global_steps + 1}):")

            test_time, test_loss, test_psnr = test_net(
                cap_h,
                cap_w,
                focal,
                renderer,
                md.Tensor(poses[i_fvs.tolist()]),
                images[i_fvs.tolist()],
                on_progress=lambda j, img: save_image(j, img, test_save_dir),  # pylint: disable=cell-var-from-loop
                on_complete=lambda imgs: save_video(global_steps + 1, imgs, test_save_dir),  # pylint: disable=cell-var-from-loop
            )

            pbar.write(
                f"Testing results: [ Mean Time: {test_time:.4f}s, Loss: {test_loss:.4f}, PSNR: {test_psnr:.4f} ]")

        global_steps += 1

    print("training done")