# sys.path.append("/home/yangchen/projects/other_experments/di-fusion")
import logging
import os
import time
from collections import defaultdict
from pathlib import Path

import imageio
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from plyfile import PlyData, PlyElement
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid, save_image
from tqdm import tqdm

from dataset.rgbd_dataset import ShapeViewDataset, Voxelizer
from fairnr.clib import (aabb_ray_intersect, inverse_cdf_sampling,
                         svo_ray_intersect, triangle_ray_intersect,
                         uniform_ray_sampling)
from network.loss import SRNLossCriterion
from network.radiance_field import RaidanceField
from network.renderer import VolumeRenderer
from options import config_parser
from utils import data_utils, exp_util, motion_util

MAX_DEPTH = 10000.0
TINY = 1e-9

class RGBD_NeRF(nn.Module):
    def __init__(self, args, device='cpu', **kwargs):
        super().__init__()
        self.args = args
        self.renderer = VolumeRenderer(args)
        self.field = RaidanceField(args)
        self.step_size = args.raymarching_stepsize_ratio * args.voxel_size
        self.voxel_size = args.voxel_size
        ### init data path
        self.log_path = os.path.join(args.log_path, args.exp_name)
        self.checkpoint_path = os.path.join(args.checkpoint_path, args.exp_name)
        os.makedirs(self.log_path,exist_ok=True)
        os.makedirs(self.checkpoint_path,exist_ok=True)
        
    def ray_processing(self, sample, encoder_states, is_train=True):
        ray_start, ray_dir, intersection_outputs, hits,S,V,P = self.ray_intersect(sample, encoder_states)
        # conduct pixel sampling
        if is_train:
            tmp_uv = sample['uv'].unsqueeze(0).unsqueeze(0) # 1, 1, 2, P
            mask = hits.reshape(S, V, P) # SVP

            sampled_uv, sampled_masks = self.sample_pixels(tmp_uv, sample['size'], mask=mask,S=S,V=V,P=P)
            sampled_masks = sampled_masks.reshape(S, -1).bool() # SVHW
            hits, sampled_masks = hits[sampled_masks].reshape(S, -1), sampled_masks.unsqueeze(-1)
            intersection_outputs = {name: outs[sampled_masks.expand_as(outs)].reshape(S, -1, outs.size(-1)) 
                for name, outs in intersection_outputs.items()}
            ray_start = ray_start[sampled_masks.expand_as(ray_start)].reshape(S, -1, 3)
            ray_dir = ray_dir[sampled_masks.expand_as(ray_dir)].reshape(S, -1, 3)
        else:
            sampled_uv = None
        min_depth = intersection_outputs['min_depth']
        max_depth = intersection_outputs['max_depth']
        pts_idx = intersection_outputs['intersected_voxel_idx']
        dists = (max_depth - min_depth).masked_fill(pts_idx.eq(-1), 0)
        intersection_outputs['probs'] = dists / dists.sum(dim=-1, keepdim=True)
        intersection_outputs['steps'] = dists.sum(-1) / self.step_size
        # save the original rays
        ray_start0 = ray_start.reshape(-1, 3).clone() # 2048, 3
        ray_dir0 = ray_dir.reshape(-1, 3).clone()

        P = ray_dir.shape[1]// V
        # all_results = defaultdict(lambda: None)

        if hits.sum() > 0:
            intersection_outputs = {
                name: outs[hits] for name, outs in intersection_outputs.items()}
            ray_start, ray_dir = ray_start[hits], ray_dir[hits]
            encoder_states = {name: s.reshape(-1, s.size(-1)) if s is not None else None
                for name, s in encoder_states.items()}
            
            ray_samples, all_results = self.raymarching(
                ray_start, ray_dir, intersection_outputs, encoder_states)

        hits = hits.reshape(-1)
        all_results = self.postprocessing(ray_start0, ray_dir0, all_results, hits, (S, V, P))

        if sampled_uv is not None:
            all_results['sampled_uv'] = sampled_uv
        
        all_results['other_logs'] = self.add_other_logs(all_results)
        all_results['samples'] = {
            'sampled_uv': all_results.get('sampled_uv', sample['uv'][None,None,:,:,None,None]),
            'ray_start': ray_start,
            'ray_dir': ray_dir
        }
        sample.update(all_results['samples'])
        return sample, all_results

    def forward(self, sample, encoder_states):
        sample, all_results = self.ray_processing(sample, encoder_states)
        return sample, all_results

    @torch.no_grad()
    def valid_step(self, sample, encoder_states, criterion, step='test'):
        data, net_output = self.ray_processing(sample, encoder_states, is_train=False)
        loss, logging_outputs = criterion.compute_loss(net_output, data)
        logging.info(f"Testing,image:{step},PSNR: {logging_outputs['PSNR']}")
        ## SAVE IMAGES
        gt_img = data_utils.recover_image(data['colors'].reshape(int(sample['size'][0]), int(sample['size'][1]),3))
        rendered = data_utils.recover_image(net_output['colors'].reshape(int(sample['size'][0]), int(sample['size'][1]), 3))
        vis = torch.cat((gt_img, rendered), dim=1)
        imageio.imwrite(os.path.join(self.log_path,f"epoch_{str(step)}.jpg"), (vis.cpu().numpy()*255.).astype(np.uint8))
        return loss, logging_outputs


    # def eval(self):
    #     pass

    def save_model(self):
        pass

    def load_model(self):
        pass

    def ray_intersect(self, sample, encoder_states):
        pose = torch.inverse(sample['extrinsics'])
        ro = pose[:3,3]
        intrinsic = sample['intrinsics']
        rd = motion_util.get_ray_direction(ro, sample['uv'], intrinsic, pose).transpose(1,0) # P, 3
        ## check the ray dir
        # Pw = ro+rd
        # uv = (intrinsic@sample['extrinsics']@motion_util.homo_vec(Pw)).T
        # new_uv = uv[:,:2] / uv[:, 2][:,None]
        S = 1
        V = 1 # current only use V eqs 1
        P, _ = rd.shape # 307200

        point_feats = encoder_states['voxel_vertex_idx'].unsqueeze(0) # (1,N,8)
        point_xyz = encoder_states['voxel_center_xyz'].unsqueeze(0) # (1,N,3)
        # _, H, D = point_feats.size() # num_voxels

        # ray-voxel intersection
        ray_start = ro.expand_as(rd).contiguous().view(S, V * P, 3).contiguous()
        ray_dir = rd.reshape(S, V * P, 3).contiguous()

        if False:  # ray-voxel intersection with SVO self.use_octreez(self.use_octree)
            flatten_centers = encoder_states['voxel_octree_center_xyz']
            flatten_children = encoder_states['voxel_octree_children_idx']
            pts_idx, min_depth, max_depth = svo_ray_intersect(
                self.voxel_size, self.max_hits, flatten_centers, flatten_children,
                ray_start, ray_dir)
        else:   # ray-voxel intersection with all voxels
            pts_idx, min_depth, max_depth = aabb_ray_intersect(
                self.args.voxel_size, self.args.max_hits, point_xyz, ray_start, ray_dir) # dim=max_hits

        # sort the depths
        min_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        max_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        min_depth, sorted_idx = min_depth.sort(dim=-1)
        max_depth = max_depth.gather(-1, sorted_idx)
        pts_idx = pts_idx.gather(-1, sorted_idx)
        hits = pts_idx.ne(-1).any(-1)  # remove all points that completely miss the object
        
        # if S > 1:  # extend the point-index to multiple shapes (just in case)
        #     pts_idx = (pts_idx + H * torch.arange(S, 
        #         device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
        #         ).masked_fill_(pts_idx.eq(-1), -1)

        intersection_outputs = {
            "min_depth": min_depth,
            "max_depth": max_depth,
            "intersected_voxel_idx": pts_idx
        }
        return ray_start, ray_dir, intersection_outputs, hits, S,V,P

    def ray_sample(self, intersection_outputs):
        # sample points and use middle point approximation
        sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
            intersection_outputs['intersected_voxel_idx'],
            intersection_outputs['min_depth'], 
            intersection_outputs['max_depth'], 
            intersection_outputs['probs'],
            intersection_outputs['steps'], 
            -1, False or (not self.training))
        sampled_dists = sampled_dists.clamp(min=0.0)
        sampled_depth.masked_fill_(sampled_idx.eq(-1), MAX_DEPTH)
        sampled_dists.masked_fill_(sampled_idx.eq(-1), 0.0)
        
        samples = {
            'sampled_point_depth': sampled_depth,
            'sampled_point_distance': sampled_dists,
            'sampled_point_voxel_idx': sampled_idx,
        }
        return samples

    def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, pos_feat=None, fine=False):
        
        samples = self.ray_sample(intersection_outputs)
        all_results = self.renderer.forward_chunk(pos_feat, self.field, ray_start, ray_dir, samples, encoder_states)

        # all_results['voxel_edges'] = self.encoder.get_edge(ray_start, ray_dir, samples, encoder_states)
        all_results['voxel_depth'] = samples['sampled_point_depth'][:, 0]
        return samples, all_results

    @torch.no_grad()
    def sample_pixels(self, uv, size, alpha=None, mask=None, **kwargs):
        H, W = int(size[0]), int(size[1])
        S,V,P = kwargs["S"],kwargs["V"],kwargs["P"]
        # S, V = uv.shape[:2]

        if mask is None:
            if alpha is not None:
                mask = (alpha > 0)
            else:
                mask = uv.new_ones(S, V, uv.size(-1)).bool()
        mask = mask.float().reshape(S, V, H, W)

        # if self.args.sampling_at_center < 1.0:
        #     r = (1 - self.args.sampling_at_center) / 2.0
        #     mask0 = mask.new_zeros(S, V, H, W)
        #     mask0[:, :, int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
        #     mask = mask * mask0
        
        # if self.args.sampling_on_bbox:
        #     x_has_points = mask.sum(2, keepdim=True) > 0
        #     y_has_points = mask.sum(3, keepdim=True) > 0
        #     mask = (x_has_points & y_has_points).float()  

        probs = mask / (mask.sum() + 1e-8)
        sampling_on_mask = 1.
        if sampling_on_mask > 0.0:
            probs = sampling_on_mask * probs + (1 - sampling_on_mask) * 1.0 / (H * W)

        num_pixels = int(self.args.pixel_per_view)
        # patch_size, skip_size = self.args.sampling_patch_size, self.args.sampling_skipping_size
        patch_size, skip_size = 1, 1
        C = patch_size * skip_size
        
        if C > 1:
            probs = probs.reshape(S, V, H // C, C, W // C, C).sum(3).sum(-1)
            num_pixels = num_pixels // patch_size // patch_size

        flatten_probs = probs.reshape(S, V, -1) 
        sampled_index = motion_util.sampling_without_replacement(torch.log(flatten_probs+ TINY), num_pixels)
        sampled_masks = torch.zeros_like(flatten_probs).scatter_(-1, sampled_index, 1).reshape(S, V, H // C, W // C)

        if C > 1:
            sampled_masks = sampled_masks[:, :, :, None, :, None].repeat(
                1, 1, 1, patch_size, 1, patch_size).reshape(S, V, H // skip_size, W // skip_size)
            if skip_size > 1:
                full_datamask = sampled_masks.new_zeros(S, V, skip_size * skip_size, H // skip_size, W // skip_size)
                full_index = torch.randint(skip_size*skip_size, (S, V))
                for i in range(S):
                    for j in range(V):
                        full_datamask[i, j, full_index[i, j]] = sampled_masks[i, j]
                sampled_masks = full_datamask.reshape(
                    S, V, skip_size, skip_size, H // skip_size, W // skip_size).permute(0, 1, 4, 2, 5, 3).reshape(S, V, H, W)
        # uv = torch.from_numpy(uv).to(size.device) # to cuda
        X, Y = uv[:,:,0].reshape(S, V, H, W), uv[:,:,1].reshape(S, V, H, W)
        X = X[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
        Y = Y[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
        return torch.cat([X, Y], 2), sampled_masks

    def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
         # we need fill_in for NSVF for background
        S, V, P = sizes
        fullsize = S * V * P
        
        all_results['missed'] = data_utils.fill_in((fullsize, ), hits, all_results['missed'], 1.0).view(S, V, P)
        all_results['colors'] = data_utils.fill_in((fullsize, 3), hits, all_results['colors'], 0.0).view(S, V, P, 3)
        all_results['depths'] = data_utils.fill_in((fullsize, ), hits, all_results['depths'], 0.0).view(S, V, P)
        
        BG_DEPTH = self.field.bg_color.depth
        bg_color = self.field.bg_color(all_results['colors'])
        all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
        all_results['depths'] += all_results['missed'] * BG_DEPTH
        if 'normal' in all_results:
            all_results['normal'] = data_utils.fill_in((fullsize, 3), hits, all_results['normal'], 0.0).view(S, V, P, 3)
        if 'voxel_depth' in all_results:
            all_results['voxel_depth'] = data_utils.fill_in((fullsize, ), hits, all_results['voxel_depth'], BG_DEPTH).view(S, V, P)
        if 'voxel_edges' in all_results:
            all_results['voxel_edges'] = data_utils.fill_in((fullsize, 3), hits, all_results['voxel_edges'], 1.0).view(S, V, P, 3)
        if 'feat_n2' in all_results:
            all_results['feat_n2'] = data_utils.fill_in((fullsize,), hits, all_results['feat_n2'], 0.0).view(S, V, P)
        return all_results

    def add_other_logs(self, all_results):
        return {'voxs_log': data_utils.item(self.args.voxel_size),
                'stps_log': data_utils.item(self.step_size)}

    def save_images(self, output_files, steps=None, combine_output=True):
        import imageio
        import numpy as np
        if not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        timestamp = time.strftime('%Y-%m-%d.%H-%M-%S',time.localtime(time.time()))
        if steps is not None:
            timestamp = "step_{}.".format(steps) + timestamp
        
        if not combine_output:
            for type in self.output_type:
                images = [imageio.imread(file_path) for file_path in output_files if type in file_path] 
                # imageio.mimsave('{}/{}_{}.gif'.format(self.output_dir, type, timestamp), images, fps=self.fps)
                imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, type, timestamp), images, fps=self.fps, quality=8)
        else:
            images = [[imageio.imread(file_path) for file_path in output_files if type == file_path.split('/')[-2]] for type in self.output_type]
            images = [np.concatenate([images[j][i] for j in range(len(images))], 1) for i in range(len(images[0]))]
            imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, 'full', timestamp), images, fps=self.fps, quality=8)
        
        return timestamp

    @torch.no_grad()
    def extract_mesh(self,encoder_states, th, bits):
        """
        extract triangle-meshes from the implicit field using marching cube algorithm
            Lewiner, Thomas, et al. "Efficient implementation of marching cubes' cases with topological guarantees." 
            Journal of graphics tools 8.2 (2003): 1-15.
        """
        # logger.info("marching cube...")
        if encoder_states is None:
            encoder_states = self.precompute(id=None)
        
        points = encoder_states['voxel_center_xyz']

        scores = self.get_scores(self.field_fn, th=th, bits=bits, encoder_states=encoder_states) # 1812,1000
        coords, residual = motion_util.discretize_points(points, self.voxel_size)
        A, B, C = [s + 1 for s in coords.max(0).values.cpu().tolist()]
        
        # prepare grids
        full_grids = points.new_ones(A * B * C, bits ** 3)
        full_grids[coords[:, 0] * B * C + coords[:, 1] * C + coords[:, 2]] = scores
        full_grids = full_grids.reshape(A, B, C, bits, bits, bits)
        full_grids = full_grids.permute(0, 3, 1, 4, 2, 5).reshape(A * bits, B * bits, C * bits)
        full_grids = 1 - full_grids

        # marching cube
        from skimage import measure
        space_step = self.voxel_size.item() / bits
        verts, faces, normals, _ = measure.marching_cubes_lewiner(
            volume=full_grids.cpu().numpy(), level=0.5,
            spacing=(space_step, space_step, space_step)
        )
        verts += (residual - (self.voxel_size / 2)).cpu().numpy()
        verts = np.array([tuple(a) for a in verts.tolist()], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
        faces = np.array([(a, ) for a in faces.tolist()], dtype=[('vertex_indices', 'i4', (3,))])
        return PlyData([PlyElement.describe(verts, 'vertex'), PlyElement.describe(faces, 'face')])

    def get_scores(self, field_fn, th=0.5, bits=16, encoder_states=None):
        if encoder_states is None:
            encoder_states = self.precompute(id=None)
        
        feats = encoder_states['voxel_vertex_idx'] 
        points = encoder_states['voxel_center_xyz']
        values = encoder_states['voxel_vertex_emb']
        chunk_size = 64

        def get_scores_once(feats, points, values):
            # sample points inside voxels
            sampled_xyz = motion_util.offset_points(points, self.voxel_size / 2.0, bits=bits)
            sampled_idx = torch.arange(points.size(0), device=points.device)[:, None].expand(*sampled_xyz.size()[:2])
            sampled_xyz, sampled_idx = sampled_xyz.reshape(-1, 3), sampled_idx.reshape(-1)
            
            field_inputs = self.forward(
                {'sampled_point_xyz': sampled_xyz, 
                 'sampled_point_voxel_idx': sampled_idx,
                 'sampled_point_ray_direction': None,
                 'sampled_point_distance': None}, 
                {'voxel_vertex_idx': feats,
                 'voxel_center_xyz': points,
                 'voxel_vertex_emb': values})  # get field inputs
            if encoder_states.get('context', None) is not None:
                field_inputs['context'] = encoder_states['context']
            
            # evaluation with density
            field_outputs = field_fn(field_inputs, outputs=['sigma'])
            free_energy = -torch.relu(field_outputs['sigma']).reshape(-1, bits ** 3)
            
            # return scores
            return torch.exp(free_energy)

        return torch.cat([get_scores_once(feats[i: i + chunk_size], points[i: i + chunk_size], values) 
            for i in range(0, points.size(0), chunk_size)], 0)

if __name__=="__main__":
    logging.basicConfig(level=logging.INFO)
    parser = config_parser()
    args = parser.parse_args()
    logging.info(args)
    device = torch.device("cuda:0")
    writer = SummaryWriter(os.path.join(args.log_path, args.exp_name,"tensorboard"))

    ### init data & model
    views = 1 ## current we only consider views eqs 1
    view_ids = [0, 30, 60, 90] ## specify views for recon
    assert len(view_ids) == args.num_view
    rgbd_dataset = ShapeViewDataset(args.root_path, views, args.num_view, load_depth=True, resolution=args.reso)
    vox = Voxelizer(args, device)

    samples = rgbd_dataset.get_base_sequence(index = 0, view_ids=view_ids)
    data_seq, encoder_states = vox.preprocess(samples)

    trainer = RGBD_NeRF(args)
    if args.resume:
        if args.load_path is not None:
            trainer.load_state_dict(torch.load(args.load_path), strict=True)
        else:
            
            assert os.path.isdir(os.path.join(args.checkpoint_path, args.exp_name))
            load_path = os.path.join(args.checkpoint_path, args.exp_name, "latest.pth.tar")
            logging.info(f"ckpt is not specified, use the latest file {load_path} instead.")
            trainer.load_state_dict(torch.load(load_path), strict=True)
    trainer.to(device)

    criterion = SRNLossCriterion(args)
    optim = torch.optim.Adam(trainer.parameters(), lr=args.lr)
    train_dataloader = torch.utils.data.DataLoader(rgbd_dataset,
                                                    batch_size=4,
                                                    shuffle=True,
                                                    num_workers=0,
                                                    drop_last=True)

    # for i in range(args.num_epoch):  
    #     for data in train_dataloader:
    #         trainer(sample, encoder_states)

    ### demo for samples:
    start=0
    for j in tqdm(range(start, args.num_epoch)):

        trainer.train()
        for i in range(len(data_seq)):
            data, net_output = trainer(data_seq[i],encoder_states)
            loss, logging_outputs = criterion.compute_loss(net_output, data)
            optim.zero_grad()
            loss.backward()
            optim.step()

            # print(loss)
        if not j % args.num_print:
            writer.add_scalar('train_loss/rgb_loss', logging_outputs['color_loss'], j)
            writer.add_scalar('train_loss/depth_loss', logging_outputs['depth_loss'], j)
            writer.add_scalar('train_loss/reg', logging_outputs['reg_loss'], j)
            writer.add_scalar('PSNR/train', logging_outputs['PSNR'], j)
            # logging.info(logging_outputs)


        if not j % args.num_valid:
            with torch.no_grad():
                trainer.eval()
                index = np.random.choice(range(len(data_seq)))
                _, logging_outputs = trainer.valid_step(data_seq[index],encoder_states, criterion, step=j)
                writer.add_scalar('val_loss/rgb_loss', logging_outputs['color_loss'], j)
                writer.add_scalar('val_loss/depth_loss', logging_outputs['depth_loss'], j)
                writer.add_scalar('val_loss/reg', logging_outputs['reg_loss'], j)
                writer.add_scalar('PSNR/test', logging_outputs['PSNR'], j)


        if not j % args.num_save:
            torch.save(trainer.state_dict(), os.path.join(args.checkpoint_path, args.exp_name, f"epoch_{str(j)}.pth.tar"))
            torch.save(trainer.state_dict(), os.path.join(args.checkpoint_path, args.exp_name, "latest.pth.tar"))
        # if not j % 100:
        #     with torch.no_grad():
        #         trainer.eval()
        #         trainer.valid_step(sample[3],encoder_states, criterion, step=j)



    