from pathlib import Path
import copy
from dataclasses import dataclass
import json
from typing import Dict, Sequence
import random
from PIL import Image

import torch
from torch.utils.data import Dataset
import transformers

from .vis_utils import process_images
from .txt_utils import preprocess_conv, preprocess_mm
from dattn.constants import IGNORE_INDEX


class ImageConvDataset(Dataset):
    def __init__(self, data_args, processor, tokenizer):
        super().__init__()
        self.data_args = data_args
        self.list_data_dict = json.load(open(data_args.data_path, "r"))
        self.processor = processor
        self.tokenizer = tokenizer

    def __len__(self):
        return len(self.list_data_dict)

    @property
    def lengths(self):
        length_list = []
        for sample in self.list_data_dict:
            img_tokens = 512 if "image" in sample else 0
            length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)
        return length_list

    @property
    def modality_lengths(self):
        length_list = []
        for sample in self.list_data_dict:
            cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
            cur_len = cur_len if "image" in sample else -cur_len
            length_list.append(cur_len)
        return length_list

    def __getitem__(self, i) -> Dict[str, torch.Tensor]:
        num_try, max_try = 0, 5
        while True:
            try:
                data = copy.deepcopy(self.list_data_dict[i])
                if "image" in data:
                    has_image = True
                    image_file = Path(self.data_args.image_folder) / data['image']
                    image = Image.open(image_file).convert('RGB')
                    image_size = image.size
                    image = process_images([image, ], self.processor, self.data_args)[0]
                    data = preprocess_mm(data["conversations"], self.data_args)
                else:
                    has_image = False
                    data = data["conversations"]
                break
            except Exception as e:
                print(repr(e))
                num_try += 1
                if num_try == max_try:
                    raise IOError(f"Error reading data.")
                else:
                    i = random.randint(0, len(self.list_data_dict) - 1)
        
        data_dict = preprocess_conv(data, self.tokenizer, has_image=has_image)
        if has_image:
            data_dict['image'] = image
            data_dict['image_size'] = image_size
        else:
            height = width = self.processor.output_size
            if self.data_args.mm_image_aspect_ratio == "anyres":
                data_dict['image'] = torch.zeros(3, 3, height, width)
                data_dict['image_size'] = (height*2, width)
            else:
                data_dict['image'] = torch.zeros(3, height, width)
                data_dict['image_size'] = (height, width)
        
        return data_dict


@dataclass
class ImageConvCollator(object):
    tokenizer: transformers.PreTrainedTokenizer

    def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
        input_ids = [instance["input_ids"] for instance in instances]
        input_ids = torch.nn.utils.rnn.pad_sequence(
            input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
        )
        input_ids = input_ids[:, :self.tokenizer.model_max_length]

        labels = [instance["labels"] for instance in instances]
        labels = torch.nn.utils.rnn.pad_sequence(
            labels, batch_first=True, padding_value=IGNORE_INDEX
        )
        labels = labels[:, :self.tokenizer.model_max_length]
        
        images = [instance['image'] for instance in instances]
        if all(x.shape == images[0].shape for x in images):
            images = torch.stack(images)

        image_sizes = [instance['image_size'] for instance in instances]

        batch = dict(
            input_ids=input_ids,
            labels=labels,
            images=images,
            image_sizes=image_sizes,
            attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
        )

        return batch
