import torch
import json
from PIL import Image
import numpy as np
from volprims_linear import PinHoleCamera
import os
import tqdm


class VolumetricPrimitivesDataset:
    def __init__(self, data_dir, image_width=1024, image_height=1024):
        """
        Initializes the dataset by setting the directory and loading the data.
        
        Args:
            data_dir (str): Directory with all the images and camera data.
        """
        self.data_dir = data_dir
        self.images = []
        # Store camera parameters as numpy arrays
        self.camera_params = []
        self.load_data(image_width, image_height)

    def load_data(self, image_width=1024, image_height=1024):
        """
        Loads images and camera data from the JSON file into memory as numpy arrays.
        """
        json_path = os.path.join(self.data_dir, 'camera_data.json')

        try:
            with open(json_path, 'r') as f:
                camera_frames = json.load(f)
        except FileNotFoundError:
            print(f"Error: 'camera_data.json' not found in {self.data_dir}")
            return
        except json.JSONDecodeError:
            print(f"Error: Could not decode 'camera_data.json'. Please check its format.")
            return

        print(f"Loading images and camera poses from {self.data_dir}...")
        for frame in tqdm.tqdm(camera_frames):
            image_path = os.path.join(self.data_dir, frame['image_file'])
            
            if not os.path.exists(image_path):
                print(f"Warning: Image file not found, skipping: {image_path}")
                continue

            try:
                # The Blender script saves a PNG where R=G=B=Alpha.
                # We load the image and extract the alpha channel as a numpy array.
                img = Image.open(image_path).convert('RGBA')
                # Resize the image to the specified dimensions
                img = img.resize((image_width, image_height), Image.Resampling.LANCZOS)
                alpha_channel = img.split()[-1]
                # Convert to numpy array, scale to [0, 1]
                img_np = np.array(alpha_channel, dtype=np.float32) / 255.0
                # Ensure the image has 4 channels (R, G, B, A). Fill up the first 3 channels with ones.
                img_np = np.stack([img_np] * 4, axis=-1)
                img_np[..., 0:3] = 1.0  # Set R, G, B channels to 1.0
                self.images.append(img_np)
            except Exception as e:
                print(f"Warning: Failed to load or process {image_path}, skipping. Error: {e}")
                continue

            # Store camera parameters as a dictionary of numpy arrays
            params = {
                "position": np.array(frame['position'], dtype=np.float32),
                "look_at": np.array(frame['look_at'], dtype=np.float32),
                "up": np.array(frame['up'], dtype=np.float32),
                "fov": frame['fov_y']
            }
            self.camera_params.append(params)
        
        print(f"Successfully loaded {len(self.images)} images and camera poses into memory as numpy arrays.")

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        """
        Returns the data for a given index. Return a (numpy.array, PinHoleCamera) tuple.
        """
        img_np = self.images[idx]
        params = self.camera_params[idx]

        # Create PinHoleCamera object from stored numpy parameters
        camera = PinHoleCamera(
            position=torch.from_numpy(params['position']),
            look_at=torch.from_numpy(params['look_at']),
            up=torch.from_numpy(params['up']),
            fov=params['fov']
        )

        # Return image as a tensor and the camera object
        return img_np, camera