import torch
from torch.utils.data import Dataset
import os, random, math
import os.path as osp
import json

from torchvision.transforms import ToTensor

from PIL import Image
import numpy as np


class DL3DVDataset(Dataset):
    def __init__(self,
        root_dir,
        split_file = "data/DL3DV/train.txt",
        resolution = "480p",
        sample_interval = 20,
        image_count = 5,
        randn_sample = False,
        transform = None
    ):
        super().__init__()

        self.root_dir = root_dir
        self.image_count = image_count
        self.randn_sample = randn_sample
        self.sample_interval = sample_interval
        self.transform = transform
        
        self.to_tensor = ToTensor()
        
        with open(split_file) as f:
            self.scenes = [osp.join(root_dir, line.strip()) for line in f.readlines()]
        
        print(f"load {len(self.scenes)} scenes from {osp.basename(split_file)} in DL3DV")

        if "Benchmark" in root_dir:
            self.folder = "nerfstudio"

        else:
            self.folder = ""


        if resolution == "480p":
            self.image_res_folder = os.path.join(self.folder, "images_8")

        elif resolution == "960p":
            self.image_res_folder = os.path.join(self.folder, "images_4")

        else:
            raise ValueError(f"unknown resolution {resolution}")
        
    
    def __len__(self):
        return len(self.scenes)
    
    def __getitem__(self, index):
        scene_dir = self.scenes[index]
        images_dir = osp.join(scene_dir, self.image_res_folder)
        transform_file = osp.join(scene_dir, self.folder, "transforms.json")
        
        views = []
        with open(transform_file) as f:
            transforms = json.load(f)

            for i, frame in enumerate(transforms["frames"]):
                frame_name = frame["file_path"].split("/")[-1]
                view = {
                    "image_path": osp.join(images_dir, frame_name),
                    "E": torch.tensor(frame["transform_matrix"]),
                }
                views.append(view)
        
        
        n_views = len(views)
        if self.image_count == -1:
            start_idx = 0
            indexs = list(range(0, n_views, self.sample_interval))

        else:
            if self.randn_sample:
                _sample_interval = self.sample_interval + random.randint(-5, 5)
                
                bound = _sample_interval * self.image_count
                
                if n_views < bound:
                    indexs = np.linspace(0, n_views - 1, self.image_count, dtype = np.int16)

                else:
                    start_idx = random.randint(0, n_views - bound)
                    indexs = list(range(start_idx, start_idx + bound, _sample_interval))
                    noise = [random.randint(-3, 3) for _ in range(len(indexs))]
                    indexs = [i + j for i, j in zip(indexs, noise)]

                    if indexs[0] < 0:
                        indexs[0] = 0
                        
                    if indexs[-1] >= n_views:
                        indexs[-1] = n_views - 1
            
            else:
                start_idx = 0
                end_idx = self.image_count * self.sample_interval
                
                indexs = list(range(start_idx, end_idx, self.sample_interval))
        
        # shuffle views
        if self.randn_sample and random.random() < 0.5:
            random.shuffle(indexs)
        
        # print(indexs)
        images, Es, Is = [], [], []
        __I = None
        for i in indexs:
            image = self.to_tensor(
                Image.open(views[i]["image_path"])
            )
            E = views[i]["E"]

            if self.transform:
                image = self.transform(image)
            
            H, W = image.shape[-2:]
            
            if __I is None:
                __I = self._read_intristic(transforms, W, H)

            images.append(image)
            Es.append(E)
            Is.append(__I)

        images = torch.stack(images)
        Es = torch.stack(Es)
        Is = torch.stack(Is)
        
        return images, Es, Is
            
    def _read_intristic(self, transform_json, tar_W, tar_H):
        orig_w = transform_json["w"]
        orig_h = transform_json["h"]

        s_w, s_h = tar_W / orig_w, tar_H / orig_h
        
        fx = transform_json["fl_x"] * s_w
        fy = transform_json["fl_y"] * s_h
        cx = tar_W / 2
        cy = tar_H / 2

        return torch.tensor([
            [fx, 0, cx], 
            [0, fy, cy], 
            [0, 0, 1]]
        )
    

