import torch
from torch import nn
from torch.utils.data import Dataset
import re, json
import numpy as np
from pathlib import Path
import os.path as osp
from PIL import Image
import random
from torchvision.transforms import ToTensor

class BlendedMVSDataset(Dataset):
    def __init__(self, 
        root_dir, 
        split_file,
        image_count = 5,
        randn_sample = True,

        transform = None,
    ):
        root_dir = Path(root_dir)
        self.transform = transform
        self.image_count = image_count
        self.randn_sample = randn_sample

        self.to_tensor = ToTensor()

        self.sequences = []

        with open(osp.join(split_file)) as f:
            for line in f:
                line = line.strip()
                if line == "":
                    continue
                self.sequences.append(root_dir / line)

        self.images, self.depths, self.cams = [], [], []
        for seq in self.sequences:
            image_dir = seq / "blended_images"
            cams_dir  = seq / "cams"
            # depth_dir = seq / "rendered_depth_maps"

            self.images.append(sorted([str(path) for path in image_dir.glob("*.jpg") if "mask" not in path.name]))
            # self.depths.append(sorted([str(path) for path in depth_dir.glob("*.pfm")]))
            self.cams.append(sorted([str(path) for path in cams_dir.glob("*_cam.txt")]))
        
    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        image_paths = self.images[idx]
        # depth_paths = self.depths[idx]
        cam_paths = self.cams[idx]

        if self.image_count == -1:
            start_idx = 0
            end_idx = len(image_paths)
        
        else:
            if self.randn_sample:
                start_idx = random.randint(0, len(image_paths) - self.image_count)
                end_idx = start_idx + self.image_count
            
            else:
                start_idx = 0
                end_idx = self.image_count

        scene_name = Path(image_paths[0]).parent.parent.name
        images, Es, Is, depths = [], [], [], []

        idxs = list(range(start_idx, end_idx))

        # random shuffle
        if self.randn_sample and random.random() < 0.5:
            random.shuffle(idxs)

        for i in idxs:
            image = self.to_tensor(Image.open(image_paths[i]))

            cam = torch.Tensor(load_cam(cam_paths[i]).copy())
            I = cam[1, :3, :3]
            E = cam[0]

            if self.transform is not None:
                image = self.transform(image)

            I[0, 2] = image.shape[2] / 2
            I[1, 2] = image.shape[1] / 2
            
            images.append(image)
            Es.append(E)
            Is.append(I)

        # random reverse
        if self.randn_sample and random.random() < 0.5:
            images.reverse()
            depths.reverse()
            Is.reverse()
            Es.reverse()
        
        images = torch.stack(images)
        Es = torch.stack(Es)
        Is = torch.stack(Is)

        return images, Es, Is

def load_pfm(path):
    color = None
    width = None
    height = None
    scale = None
    data_type = None

    with open(path, 'rb') as file:
        header = file.readline().decode('UTF-8').rstrip()

        if header == 'PF':
            color = True
        elif header == 'Pf':
            color = False
        else:
            raise Exception('Not a PFM file.')
        dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('UTF-8'))
        if dim_match:
            width, height = map(int, dim_match.groups())
        else:
            raise Exception('Malformed PFM header.')
        # scale = float(file.readline().rstrip())
        scale = float((file.readline()).decode('UTF-8').rstrip())
        if scale < 0: # little-endian
            data_type = '<f'
        else:
            data_type = '>f' # big-endian
        data_string = file.read()
        data = np.frombuffer(data_string, data_type)
        shape = (height, width, 3) if color else (height, width)
        data = np.reshape(data, shape)
        data = np.flip(data, 0)

    return data
def load_cam(path, interval_scale = 1, MAX_D = 192):
    """ read camera txt file """
    with open(path, 'r') as file:
        cam = np.zeros((2, 4, 4))
        words = file.read().split()
        # read extrinsic
        for i in range(0, 4):
            for j in range(0, 4):
                extrinsic_index = 4 * i + j + 1
                cam[0][i][j] = words[extrinsic_index]

        # read intrinsic
        for i in range(0, 3):
            for j in range(0, 3):
                intrinsic_index = 3 * i + j + 18
                cam[1][i][j] = words[intrinsic_index]

        if len(words) == 29:
            cam[1][3][0] = words[27]
            cam[1][3][1] = float(words[28]) * interval_scale
            cam[1][3][2] = MAX_D
            cam[1][3][3] = cam[1][3][0] + cam[1][3][1] * cam[1][3][2]
        elif len(words) == 30:
            cam[1][3][0] = words[27]
            cam[1][3][1] = float(words[28]) * interval_scale
            cam[1][3][2] = words[29]
            cam[1][3][3] = cam[1][3][0] + cam[1][3][1] * cam[1][3][2]
        elif len(words) == 31:
            cam[1][3][0] = words[27]
            cam[1][3][1] = float(words[28]) * interval_scale
            cam[1][3][2] = words[29]
            cam[1][3][3] = words[30]
        else:
            cam[1][3][0] = 0
            cam[1][3][1] = 0
            cam[1][3][2] = 0
            cam[1][3][3] = 0

    return cam
