import os
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as T
from torchvision.datasets import VisionDataset
import json
import random
from torchvision.transforms import functional as TF

class RandoRotate:
    def __init__(self, angle):
        self.angle = angle

    def __call__(self, data):
        image1, image2 = data
        angle = random.randint(-self.angle, self.angle)
        image1 = TF.rotate(image1, angle)
        image2 = TF.rotate(image2, angle)
        return image1, image2

class RandomVericalFlip:
    def __init__(self, p=0.2):
        self.p = p

    def __call__(self, data):
        image1, image2 = data
        if random.random() < self.p:
            image1 = TF.vflip(image1)
            image2 = TF.vflip(image2)
        return image1, image2

class RandomHorizontalFlip:
    def __init__(self, p=0.2):
        self.p = p

    def __call__(self, data):
        image1, image2 = data
        if random.random() < self.p:
            image1 = TF.hflip(image1)
            image2 = TF.hflip(image2)
        return image1, image2

class RandomCropToSize:
    def __init__(self, size, p=0.1):
        self.size = size
        self.p = p

    def __call__(self, data):
        # Calculate the top-left point of the crop
        image1, image2 = data
        if random.random() < self.p:
            width, height = image1.size
            crop_width, crop_height = self.size
            x = random.randint(0, width - crop_width)
            y = random.randint(0, height - crop_height)

            # Apply the same crop to both images
            image1 = TF.crop(image1, y, x, crop_height, crop_width)
            image2 = TF.crop(image2, y, x, crop_height, crop_width)

        return image1, image2

class ToTensor(object):
    def __init__(self, ):
        pass

    def __call__(self, data):
        UV, image2 = data
        UV = T.ToTensor()(UV)
        UV[2,:,:] = 0
        UV = 2.0 * UV - 1.0
        image2 = T.ToTensor()(image2)
        image2 = 2.0 * image2 - 1.0
        return UV, image2


class uvDataset(VisionDataset):
    def __init__(self, root, num_cam, split='train', dropout=0.0, is_raytraced=True, is_stack=False, sel_ind = [], train_ratio=1.0, resize=800):
        super().__init__(root)
        self.dataroot = root
        self.num_cam = num_cam
        self.sel_ind = sel_ind
        self.is_raytraced = is_raytraced
        self.split = split
        self.dropout = dropout
        self.resize = resize
        self.json_path = os.path.join(self.dataroot,"transforms_%s.json"%(split))
        with open(self.json_path,"r") as f:
            self.json_content = json.load(f)
        self.camera_angle_x = self.json_content["camera_angle_x"]
        self.frames = self.json_content["frames"]
        if is_stack:
            self.frames = [self.frames[:self.num_cam]]
        else:
            self.frames = [[frame] for frame in self.frames]
            if len(self.sel_ind) == 0:
                self.frames = self.frames[:int(len(self.frames)*train_ratio)]
            else:
                print("sel ind length:", len(self.sel_ind))
                print('sel ind:', self.sel_ind)
                self.frames = [self.frames[i] for i in self.sel_ind]
        self.camera_intrinsics = self.json_content["camera_intrinsics"]

        if split == "train" and is_stack == False:
            self.transform = T.Compose([
                RandomVericalFlip(),
                RandomHorizontalFlip(),
                RandoRotate(10),
                # RandomCropToSize((512, 512)),
                ToTensor(),
            ])

        elif split == "train" and is_stack == True:
            self.transform = T.Compose([
                RandomVericalFlip(),
                RandomHorizontalFlip(),
                RandoRotate(10),
                ToTensor(),
            ])
        else:
            self.transform = T.Compose([
                ToTensor(),
            ])

        # self.image_rasterization_dir = os.path.join(self.dataroot,"rgb_rasterization")
        # self.image_raytraced_dir = os.path.join(self.dataroot,"rgb")
        # self.uvs_dir = os.path.join(self.dataroot,"uv")
        # self.depth_dir = os.path.join(self.dataroot,"depth")
        # self.normal_dir = os.path.join(self.dataroot,"normal")
        # self.total_img_nums = len(os.listdir(self.uvs_dir))
        # if self.split in ["train"]:
        #     img_nums = int(split_ratio*self.total_img_nums)
        #     self.split_img_nums = img_nums
        #     ids_start_end = [i for i in range(0,img_nums+num_cam,num_cam)]
        #     self.uvs_ids = [list(range(ids_start_end[j],ids_start_end[j+1],1)) for j in range(len(ids_start_end)-1)]
        #     # self.uvs_ids = self.uvs_ids[0:len(self.uvs_ids):5]
        # else:
        #     img_nums = int((1-split_ratio)*self.total_img_nums)
        #     self.split_img_nums = img_nums
        #     ids_start_end = [i for i in range(self.total_img_nums-img_nums,self.total_img_nums+num_cam,num_cam)]
        #     self.uvs_ids = [list(range(ids_start_end[j],ids_start_end[j+1],1)) for j in range(len(ids_start_end)-1)]

        print(f'{split}: {len(self.frames)} views')

    def __len__(self):
        return len(self.frames)

    def __getitem__(self, idx, visualize=False):
        
        frame_list = self.frames[idx]
        # select frame from sequence
        imgs = []
        tgts = []
        imgs_name = []
        extrinsics = []
        for frame in frame_list:
            imgs_name.append(os.path.basename(frame["rgb_path"]))
            extrinsics.append(np.array(frame["transform_matrix"],dtype=np.float32))
            UV = Image.open(os.path.join(self.dataroot,frame["uv_path"])).convert('RGB').resize((self.resize,self.resize))    

            if self.is_raytraced:
                img_raytraced = Image.open(os.path.join(self.dataroot,frame["rgb_path"])).resize((self.resize,self.resize))
                UV,img_raytraced = self.transform((UV, img_raytraced))
                tgts.append(img_raytraced)
            else:
                img_rasterization = Image.open(os.path.join(self.dataroot,frame["rgb_rasterize_path"])).resize((self.resize,self.resize))
                UV,img_rasterization = self.transform((UV,img_rasterization))
                tgts.append(img_rasterization)
            
            imgs.append(UV)
        
        imgs = torch.stack(imgs)
        tgts = torch.stack(tgts)
        extrinsics = np.array(extrinsics)

        drop, keep_cams = np.random.rand() < self.dropout, torch.ones(self.num_cam, dtype=torch.bool)
        if drop:
            num_drop = np.random.randint(self.num_cam - 1)
            drop_cams = np.random.choice(self.num_cam, num_drop, replace=False)
            for cam in drop_cams:
                keep_cams[cam] = 0
        
        batch_data = {
            "imgs": imgs,
            "tgts": tgts,
            "imgs_name": imgs_name,
            "extrinsics": extrinsics,
            "keep_cams": keep_cams
        }
        return batch_data


if __name__ == '__main__':
    dataset = imgDataset('/home/houyz/Data/modelnet/modelnet40_images_new_12x', 12)
    dataset.__getitem__(0)
    dataset.__getitem__(len(dataset) - 1, visualize=True)
