import os
import random
import cv2
import numpy as np
import torch
from accelerate.logging import get_logger
from PIL import Image
from torch.utils.data import IterableDataset
from torchvision import transforms
import json

logger = get_logger(__name__)

def get_min_bounding_box(mask, pp=5):
    H = np.shape(mask)[0]
    W = np.shape(mask)[1]
    nonzero_indices = np.nonzero(mask)
    if len(nonzero_indices) == 0:
        return mask
    min_row = max(np.min(nonzero_indices[0]) - pp, 0)
    max_row = min(np.max(nonzero_indices[0]) + pp, H)
    min_col = max(np.min(nonzero_indices[1]) - pp, 0)
    max_col = min(np.max(nonzero_indices[1]) + pp, W)
    bounding_box = np.zeros_like(mask)
    bounding_box[min_row : max_row + 1, min_col : max_col + 1] = 255
    return bounding_box

def random_warponly(img, sigma=15, patch=40):
    if np.max(img) > 128:
        img = img / 255
    h, w = img.shape[:2]
    dx = np.random.normal(0, sigma, (int(w / patch), int(h / patch)))
    dy = np.random.normal(0, sigma, (int(w / patch), int(h / patch)))
    dx = cv2.resize(dx, dsize=(w, h), interpolation=cv2.INTER_CUBIC)
    dy = cv2.resize(dy, dsize=(w, h), interpolation=cv2.INTER_CUBIC)
    x, y = np.meshgrid(np.arange(w), np.arange(h))
    map_x = (x + dx).astype(np.float32)
    map_y = (y + dy).astype(np.float32)
    warped = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)
    warped += img
    warped[warped > 0.5] = 1.0
    warped[warped <= 0.5] = 0.0
    warped = warped * 255.0
    return warped

class RandomCrop(object):
    def __init__(self, size):
        self.size = size

    def __call__(self, image, target):
        crop_params = transforms.RandomCrop.get_params(image, output_size=(self.size, self.size))
        image = transforms.functional.crop(image, *crop_params)
        if target is not None:
            target = transforms.functional.crop(target, *crop_params)
        return image, target

def augment_images(image, mask, resolution):
    _, mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)
    mask = Image.fromarray(mask.astype("uint8")).convert("L")
    resize = transforms.Resize((resolution))
    image, mask = resize(image), resize(mask)
    crop = RandomCrop(resolution)
    image, mask = crop(image, mask)
    if random.random() > 0.5:
        image = transforms.functional.hflip(image)
        mask = transforms.functional.hflip(mask)
    toT = transforms.ToTensor()
    image = toT(image)
    mask = toT(mask)
    mask[mask != 0] = 1
    normalize = transforms.Normalize(mean=[0.5], std=[0.5])
    image = normalize(image)
    return image, mask

class Petro_Dataset(IterableDataset):
    def __init__(
        self,
        transforms,
        pipeline,
        task_prompt,
        desc_prefix=False,
        name=None,
        json_root=None,  # 直接指向 JSON 文件
        bufsize=None,
        clip_score_threshold=None,
        aesthetic_score_threshold=0.5,
        resolution=None,
        deterministic=False,
        use_petreloss=False,
        **kwargs,
    ):
        super().__init__()
        assert json_root is not None, "Please provide the path to the JSON file."
        self.name = name
        self.anno_list = []
        # 加载 JSON 文件
        with open(json_root, 'r') as file:
            for line in file:
                self.anno_list.append(json.loads(line))
        random.shuffle(self.anno_list)
        self.json_root = json_root
        self.bufsize = bufsize
        self.resolution = resolution
        self.epoch = -1
        self.deterministic = deterministic
        self.pipeline = pipeline
        self.task_prompt = task_prompt
        self.desc_prefix = desc_prefix
        self.aesthetic_score_threshold = aesthetic_score_threshold
        self.clip_score_threshold = clip_score_threshold
        self.transforms = transforms

    def _sample_data(self, data_info):
        output = {}
        # 加载图像
        image_path = data_info["imgpath"]
        image = Image.open(image_path)
        w, h = image.size
        if w < 512 or h < 512:
            return None
        # 随机选择一个掩码和对应的描述
        mask_idx = random.randint(0, len(data_info["maskpath"]) - 1)
        mask_path = data_info["maskpath"][mask_idx]
        prompt = data_info["text_en"][mask_idx]
        # 加载掩码
        mask = Image.open(mask_path).convert("L")
        mask = mask.resize((w, h), Image.NEAREST)
        mask = np.array(mask).astype(np.float32)
        if len(mask.shape) == 3:
            mask = mask[:, :, 0]
        object_size = mask.sum() / 255.0
        if object_size == 0:
            return None
        else:
            mask = cv2.dilate(mask, np.ones((3, 3), np.uint8), iterations=1)
            aug_mask = get_min_bounding_box(mask, pp=2)
            if random.random() > 0.5:
                aug_mask = random_warponly(
                    aug_mask,
                    sigma=20 / 200 * (object_size ** (0.5)),
                    patch=max(60 / 200 * (object_size ** (0.5)), 4),
                )
            alpha = torch.tensor((1.0, 0.0))
        output["pixel_values"], output["mask"] = augment_images(image, aug_mask, self.resolution)
        if output["pixel_values"].shape != (3, 512, 512) or output["mask"].shape != (1, 512, 512):
            return None
        if len(torch.unique(output["mask"])) == 1:
            return None
        output["tradeoff"] = alpha
        # 处理任务提示
        task_type = "object_inpainting"
        if random.random() < 0.5:
            task_type = "context_inpainting"
        if task_type == "context_inpainting":
            output["mask"] = torch.zeros((1, 512, 512)).to(output["mask"].device)
            x_start = random.randint(0, 512 - 64)
            y_start = random.randint(0, 512 - 64)
            output["mask"][0, x_start:x_start + 64, y_start:y_start + 64] = 1
        # 处理提示文本
        promptA = self.task_prompt.object_inpainting.placeholder_tokens
        promptB = self.task_prompt.object_inpainting.placeholder_tokens
        if self.desc_prefix and prompt != "":
            promptA, promptB = f"{promptA} {prompt}", f"{promptB} {prompt}"
        output["input_idsA"], output["input_idsB"], output["input_ids"] = self.pipeline.tokenizer(
            [promptA, promptB, prompt],
            max_length=self.pipeline.tokenizer.model_max_length,
            padding="max_length",
            truncation=True,
            return_tensors="pt",
        ).input_ids
        return output

    def sample_data(self):
        for anno_info in self.anno_list:
            try:
                data = self._sample_data(anno_info)
                if data is None:
                    continue
                else:
                    yield data
            except Exception as e:
                print(f"Error in {anno_info}: {e}")
                continue

    def __iter__(self):
        for data in self.sample_data():
            yield data

    def __len__(self):
        return len(self.anno_list)

    def __repr__(self):
        return f"Petro_Dataset(name={self.name}, resolution={self.resolution})"