import os
import json
import math
import numpy as np
from PIL import Image
import cv2
import torch
from torch.utils.data import Dataset, DataLoader, IterableDataset
import torchvision.transforms.functional as TF

import pytorch_lightning as pl
from pytorch_lightning.utilities.rank_zero import _get_rank

import datasets
from models.ray_utils import get_ray_directions
class BlenderDatasetBase():
    def setup(self, block, config, split):
        self.config = config
        self.split = split
        self.block = block
        self.rank = _get_rank()
        #camdata = read_cameras_binary(os.path.join(self.config.root_dir, 'dataset/sparse/0/cameras.bin'))

        with open(os.path.join(self.config.root_dir, f"blocks/generated/transforms_final_{self.block}.json"), 'r') as f:
            meta = json.load(f)

        if 'w' in meta and 'h' in meta:
            W, H = int(meta['w']), int(meta['h'])
        else:
            W, H = 348, 256


        if self.split != 'predict':
            self.centroid = meta['centroid']

        self.w, self.h = W, H
        self.img_wh = (self.w, self.h)
        self.focal = 0.5 * W / math.tan(meta['camera_angle_x'] * np.pi / 360.0) # scaled focal length


        # ray directions for all pixels, same for all images (same H, W, focal)
        self.directions = \
            get_ray_directions(self.w, self.h, self.focal, self.focal, self.w//2, self.h//2, self.config.use_pixel_centers, self.config.use_UE4).to(self.rank) # (h, w, 3)    


        mask_dir = os.path.join(self.config.root_dir, 'masks')
        self.use_mask =self.config.use_mask
        
        self.all_img_idxs, self.all_c2w, self.all_images, self.all_fg_masks, self.all_depths, self.all_sky_masks = [], [], [], [], [], []
        if self.split == 'test':
            for i, frame in enumerate(meta['frames']):
                #c2w=np.array([[-2.9510787e-01 ,1.9534953e-02 ,-9.5526421e-01 ,4.2893934e+00],
                #            [ 4.0453016e-03 ,-9.9975646e-01 ,-2.1694517e-02 ,4.3602742e-02],
                #            [-9.5545536e-01 ,-1.0266555e-02  ,2.9495698e-01 ,-3.6810935e+00]
                #            ])
                img_index = torch.tensor(frame['index'], dtype=torch.int32)
                self.all_img_idxs.append(img_index)
                c2w = torch.from_numpy(np.array(frame['transform_matrix'])[:3, :4])
                c2w[0,3] -= self.centroid[0]
                c2w[1,3] -= self.centroid[1]
                self.all_c2w.append(c2w)
                #all_c2w=torch.tensor(all_c2w)
            self.all_img_idxs = torch.stack(self.all_img_idxs, dim=0)
            self.all_c2w = torch.stack(self.all_c2w, dim=0)
            len_dataset = self.all_c2w.shape[0]
            self.all_images = torch.zeros((len_dataset, self.h, self.w, 3), dtype=torch.float32)
            self.all_depths = torch.zeros((len_dataset, self.h, self.w, 1), dtype=torch.float32)
            self.all_fg_masks = torch.zeros((len_dataset, self.h, self.w), dtype=torch.float32)
        elif self.split == 'predict':
            for i, frame in enumerate(meta['frames']):
                self.all_c2w=[]
                #c2w=np.array([[-2.9510787e-01 ,1.9534953e-02 ,-9.5526421e-01 ,4.2893934e+00],
                #            [ 4.0453016e-03 ,-9.9975646e-01 ,-2.1694517e-02 ,4.3602742e-02],
                #            [-9.5545536e-01 ,-1.0266555e-02  ,2.9495698e-01 ,-3.6810935e+00]
                #            ])
                c2w = torch.from_numpy(np.array(frame['transform_matrix'])[:3, :4])
                self.all_c2w.append(c2w)
                #all_c2w=torch.tensor(all_c2w)
                self.all_c2w = torch.stack(self.all_c2w, dim=0)

                #self.all_c2w = create_spheric_poses(self.all_c2w[:,:,3], n_steps=self.config.n_test_traj_steps)
                self.all_images = torch.zeros((360, self.h, self.w, 3), dtype=torch.float32)
                self.all_depths = torch.zeros((360, self.h, self.w, 1), dtype=torch.float32)
                self.all_fg_masks = torch.zeros((360, self.h, self.w), dtype=torch.float32)            
        else:
            self.all_images, self.all_fg_masks, self.all_depths = torch.stack(self.all_images, dim=0), torch.stack(self.all_fg_masks, dim=0), torch.stack(self.all_depths, dim=0)

        """
        # for debug use
        from models.ray_utils import get_rays
        rays_o, rays_d = get_rays(self.directions.cpu(), self.all_c2w, keepdim=True)
        pts_out = []
        pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 1.0 0.0 0.0' for l in rays_o[:,0,0].reshape(-1, 3).tolist()]))

        t_vals = torch.linspace(0, 1, 8)
        z_vals = 0.05 * (1 - t_vals) + 0.5 * t_vals

        ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,0,0][..., None, :])
        pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 1.0 0.0' for l in ray_pts.view(-1, 3).tolist()]))

        ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,self.h-1,0][..., None, :])
        pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 0.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))

        ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,0,self.w-1][..., None, :])
        pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 1.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))

        ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,self.h-1,self.w-1][..., None, :])
        pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 1.0 1.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))
        
        open('cameras.txt', 'w').write('\n'.join(pts_out))
        open('scene.txt', 'w').write('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 0.0 0.0' for l in pts3d.view(-1, 3).tolist()]))

        exit(1)
        """

        self.all_c2w, self.all_fg_masks,self.all_depths, self.all_images = \
            self.all_c2w.float().to(self.rank), \
            self.all_fg_masks.float().to(self.rank), \
            self.all_depths.float().to(self.rank), \
            self.all_images.float().to(self.rank), \
            
        

class BlenderDataset(Dataset, BlenderDatasetBase):
    def __init__(self, block, config, split):
        self.setup(block, config, split)

    def __len__(self):
        return len(self.all_images)
    
    def __getitem__(self, index):
        return {
            'index': index
        }


class BlenderIterableDataset(IterableDataset, BlenderDatasetBase):
    def __init__(self, config, split):
        self.setup(config, split)

    def __iter__(self):
        while True:
            yield {}


@datasets.register('blender-blocks-final-test')
class BlenderDataModule(pl.LightningDataModule):
    def __init__(self, config, block):
        super().__init__()
        self.config = config
        self.block = block
    def setup(self, stage=None):
        if stage in [None, 'fit']:
            self.train_dataset = BlenderIterableDataset(self.config, 'train')
        if stage in [None, 'fit', 'validate']:
            self.val_dataset = BlenderDataset(self.config, 'val')
        if stage in [None, 'test']:
            self.test_dataset = BlenderDataset(self.block, self.config, 'test')
        if stage in [None, 'predict']:
            self.predict_dataset = BlenderDataset(self.config, 'train')            

    def prepare_data(self):
        pass
    
    def general_loader(self, dataset, batch_size):
        sampler = None
        return DataLoader(
            dataset, 
            num_workers=os.cpu_count(), 
            batch_size=batch_size,
            pin_memory=True,
            sampler=sampler
        )
    
    def train_dataloader(self):
        return self.general_loader(self.train_dataset, batch_size=1)

    def val_dataloader(self):
        return self.general_loader(self.val_dataset, batch_size=1)

    def test_dataloader(self):
        return self.general_loader(self.test_dataset, batch_size=1) 

    def predict_dataloader(self):
        return self.general_loader(self.predict_dataset, batch_size=1)       
