import os
import datasets
from datasets import load_dataset, ClassLabel, concatenate_datasets
import torch
import numpy as np
import random
from PIL import Image
import json
import copy
# import torchvision.transforms as T
from torchvision import transforms
import pickle 
import re

from OmniGen import OmniGenProcessor
from OmniGen.processor import OmniGenCollator


class DatasetFromJson(torch.utils.data.Dataset):
    def __init__(
        self,
        json_file: str, 
        image_path: str,
        processer: OmniGenProcessor,
        input_transform,
        output_transform,
        max_input_length_limit: int = 18000,
        condition_dropout_prob: float = 0.1,
        keep_raw_resolution: bool = True, 
    ):
        self.input_transform = input_transform
        self.output_transform = output_transform
        self.processer = processer
        self.condition_dropout_prob = condition_dropout_prob
        self.max_input_length_limit = max_input_length_limit
        self.keep_raw_resolution = keep_raw_resolution

        if json_file.split(".")[-1] == "txt":
            self.data = []
            with open(json_file, 'r', encoding='utf-8') as file:
                for line in file:
                    self.data.append(line.strip())
            self.is_txt = True
        else:
            self.data = load_dataset('json', data_files=json_file)['train']
            self.is_txt = False

        self.image_path = image_path

    def process_input(self, root_path, image_file):
        image_file = os.path.join(root_path, image_file)
        image = Image.open(image_file).convert('RGB')
        return self.input_transform(image)
    
    def process_output(self, root_path, image_file):
        image_file = os.path.join(root_path, image_file)
        image = Image.open(image_file).convert('RGB')
        return self.output_transform(image)

    def get_example(self, index):
        example = self.data[index]
        instruction, input_images, output_image = example['instruction'], example['input_images'], example['output_image']
        if random.random() < self.condition_dropout_prob:
            instruction = '<cfg>'
            input_images = None
        if input_images is not None:
            input_images = [self.process_output("/home/bsliu/gitprojects/OmniGen/data/unsplash2k/images", x) for x in input_images]
        mllm_input = self.processer.process_multi_modal_prompt(instruction, input_images)
        output_image = self.process_output("/share/project/dataset_raw/Unsplash2K", output_image)
        return (mllm_input, output_image, instruction)
    
    def txt_get_example(self, index):
        example = self.data[index]
        instruction = "Perform pattern completion on <img><|image_1|></img>."
        input_images = output_image = example
        if random.random() < self.condition_dropout_prob:
            instruction = '<cfg>'
            input_images = None
        if input_images is not None:
            input_images = [self.process_input("/share/project/dataset_raw/imagenet/train", input_images)]
        mllm_input = self.processer.process_multi_modal_prompt(instruction, input_images)
        output_image = self.process_output("/share/project/dataset_raw/imagenet/train", output_image)
        return (mllm_input, output_image, instruction, example)

    def __getitem__(self, index):
        if self.is_txt:
            return self.txt_get_example(index)
        else:
            return self.get_example(index)
    
    def __len__(self):
        return len(self.data)



class TrainDataCollator(OmniGenCollator):
    def __init__(self, pad_token_id: int, hidden_size: int, keep_raw_resolution: bool):
        self.pad_token_id = pad_token_id
        self.hidden_size = hidden_size
        self.keep_raw_resolution = keep_raw_resolution

    def __call__(self, features):
        mllm_inputs = [f[0] for f in features]
        # mllm_input:[{
        #   "input_ids": [xxx0000xx00000xxxx], 
        #   "pixel_values": [tensor(img1), tensor(img2)], 
        #   "image_sizes": [ [3,7], [9, 14] ]
        # },{},{},{}]
        output_images = [f[1].unsqueeze(0) for f in features]
        target_img_size = [[x.size(-2), x.size(-1)] for x in output_images]
        instruction = [f[2] for f in features]
        file_path = [f[3] for f in features]

        all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes = self.process_mllm_input(mllm_inputs, target_img_size)

        if not self.keep_raw_resolution:
            output_images = torch.cat(output_images, dim=0)
            if len(all_pixel_values) > 0:
                all_pixel_values = torch.cat(all_pixel_values, dim=0)
            else:
                all_pixel_values = None

        data = {"input_ids": all_padded_input_ids,  # tensor(n*(p*pad_id+txt))
            "attention_mask": all_attention_mask,   # ?
            "position_ids": all_position_ids,       # tensor(n*(p*0 + range(txt+oimg+1)))
            "input_pixel_values": all_pixel_values, # [tensor(c,h,w), tensor(c,h,w), tensor(c,h,w), tensor(c,h,w), tensor(c,h,w)]
            "input_image_sizes": all_image_sizes,   # [0:[[p+3,p+7],[p+9,p+14]], 2:[[]], 3:[[]]]
            "padding_images": all_padding_images,   # [tensor(1, pad_oimg, 3072), xxx, xxx] 
            "output_images": output_images,         # [tensor(c,h,w), tensor(c,h,w), tensor(c,h,w)]
            "instruction": instruction,             # [txt1,txt2,txt3]
            "file_path": file_path,
        }
        return data





