import torch
import numpy as np
import random

import cv2
from torch.utils.data import Dataset
from transformers import BertTokenizer
from utils import get_image_filename


# 训练时加载数据代码
class POIDataset(Dataset):

    def __init__(self, data: list[dict], train=True, load_image=False, dsize=(224, 224), draw=False):
        self.data = data
        self.train = train
        self.load_image = load_image
        self.dsize = dsize
        self.draw = draw

    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, index):
        """将预处理的数据转为模型可用数据.

        加入重新排列功能，避免序列只按一个顺序输入模型中.

        Returns:
            texts (list[str]).
            position (list[torch.Tensor]).
            seq_id (str): 该数据对应的id，用于组成batch时使用.
            rankorder (list [int]): -1代表不在POI里, 其余代表位置索引.
            images (torch.Tensor [ocr_num+2, 3, H, W]): 后两个图像为board的图像以及给所有ocr加上框后的board图像.
        """
        data = self.data[index]
        random_index = list(range(len(data["texts"])))
        random.shuffle(random_index)
        texts = [data["texts"][idx] for idx in random_index]
        positions = torch.zeros((len(texts), 5))
        positions[:, :4] = torch.Tensor(data["normlize_bboxs"])
        positions[:, -1] = torch.Tensor(data["areas"])
        positions = positions[random_index]
        rankorder = None
        if self.train:
            rankorder = [data["rankorder"][idx] for idx in random_index]
        images = None
        if self.load_image:
            images = get_images(data, self.dsize, self.draw)
        return texts, positions, data["point_seq_id"], rankorder, images
        

class MyCollator:

    def __init__(self, tokenizer: BertTokenizer) -> None:
        self.tokenizer = tokenizer
    
    def __call__(self, batch):
        """将数据拼接为一个batch，长度为所有ocr框的个数.

        Returns:
            inputs (dict): 包含input_ids, attention_mask的输入token序列数据，均为tensor类型.
            positions (torch.Tensor): 每个OCR框对应的位置以及面积占比信息.
            id_map (list[tuple]): [(id, id对应的数据在positions中的位置)]
            label (torch.LongTensor): 每个OCR框是否出现在name中, 如果train=False则该项返回None.
            rankorder (torch.LongTensor): OCR在POI的位置.
            images (torch.Tensor [ocr_num, 3, 244, 244])
        """
        lengths = [len(x[0]) for x in batch]
        int_lengths = [0] + np.cumsum(lengths).tolist()
        id_map = [(x[2], int_lengths[i], int_lengths[i+1]) for i, x in enumerate(batch)]
        texts = sum([x[0] for x in batch], [])
        inputs = self.tokenizer(texts, padding=True, truncation=True,
                                return_tensors="pt", verbose=False, return_token_type_ids=False)
        images = torch.cat([x[4][0] for x in batch], dim=0) if batch[0][4] is not None else None
        # images_with_bbox = torch.cat([x[4][1] for x in batch], dim=0) if batch[0][4] is not None else None
        full_images = torch.cat([x[4][1] for x in batch], dim=0) if batch[0][4] is not None else None
        # full_images_with_bbox = torch.cat([x[4][3] for x in batch], dim=0) if batch[0][4] is not None else None
        rankorder = None

        if batch[0][3] is not None:
            rankorder = torch.LongTensor(sum([x[3] for x in batch], []))
        positions = torch.cat([x[1] for x in batch])
        # (f"text: {len(texts)}, images: {images.size(0)}, positions: {positions.size(0)}")
        return inputs, positions, id_map, rankorder, images, full_images


class InferCollator:

    def __init__(self, tokenizer: BertTokenizer) -> None:
        self.tokenizer = tokenizer
    
    def __call__(self, batch):
        lengths = [len(x[0]) for x in batch]
        int_lengths = [0] + np.cumsum(lengths).tolist()
        id_map = [(x[2], int_lengths[i], int_lengths[i+1]) for i, x in enumerate(batch)]
        texts = sum([x[0] for x in batch], [])
        inputs = self.tokenizer(texts, padding=True, truncation=True,
                                return_tensors="pt", verbose=False, return_token_type_ids=False)
        images = torch.cat([x[4][0] for x in batch], dim=0) if batch[0][4] is not None else None
        # images_with_bbox = torch.cat([x[4][1] for x in batch], dim=0) if batch[0][4] is not None else None
        full_images = torch.cat([x[4][1] for x in batch], dim=0) if batch[0][4] is not None else None
        origin_texts = {
            x[2]: x[0] for x in batch
        }
        texts = sum([x[0] for x in batch], [])
        inputs = self.tokenizer(texts, padding=True, truncation=True,
                                return_tensors="pt", verbose=False, return_token_type_ids=False)
        positions = torch.cat([x[1] for x in batch])
        return inputs, positions, id_map, origin_texts, images, full_images
        

def get_images(data: dict, dsize: tuple[int] = (224, 224), draw=False):
    """按顺序获取图像OCR框并resize

    Returns:
        images (torch.Tensor [2*ocr_num+2, 3, dsize, dsize]): 前面ocr+1个为不带框的，后面的为带框的，模型里利用一些技巧处理.
    """
    img = cv2.cvtColor(cv2.imread(get_image_filename(data["image_id"])), cv2.COLOR_BGR2RGB)
    if draw:
        for contour in data["contour"] + [data["board_contour"]]:
            img = cv2.polylines(img, [np.array(contour).reshape(-1, 1, 2)], True, (255, 0, 0), 2)
    # print(img.shape)
    slices = [img[bbox[1]: bbox[3], bbox[0]: bbox[2], :] for bbox in data["origin_bboxs"] + [data["board_bbox"]]]
    # print(slices[0].shape)
    slices = [cv2.resize(x, dsize) for x in slices]
    # 直接绘制OCR框，引入多边形形状信息
    
    images = torch.from_numpy(np.array(slices)).permute(0, 3, 1, 2) / 255
    # print(f"sample image: {images.size(0)}, texts: {len(data['texts'])}")
    return images[:-1, :], images[-1].unsqueeze(0)