# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: read_txt
# > Author: 04000387
# > Created Time: 2024/12/26 10:30
# *******************************************************************
import torch
from torch.utils.data import Dataset
from pathlib import Path
import json
import cv2
import numpy as np


class ReadTxt(Dataset):
    def __init__(self, base_dir="../generate_dir/generate"):
        super().__init__()
        self.path = Path(base_dir)
        self.items = self._cat_items()

    def _cat_items(self):
        path = self.path / "txt" / "positive"
        items = []
        for jsonl_path in path.iterdir():
            if jsonl_path.is_file() and jsonl_path.suffix == ".jsonl":
                fp = jsonl_path.open("r", encoding="utf-8")
                items.extend(fp.readlines())
                fp.close()

        return items

    def __len__(self):
        return len(self.items)

    def __getitem__(self, idx):
        if idx == 21321 or idx == 24776:
            idx += 1
        item = json.loads(self.items[idx])
        img_path = item["img_file"]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        content = item["content"]

        cls = 0
        if content != "这是一张背景图,无文字":
            cls = 1

        return img / 255.0, content, cls


class ReadWithBoxData(Dataset):
    def __init__(self, base_path):
        super().__init__()
        self.base_path = Path(base_path)
        self.items = self._cat_items()

    def _cat_items(self):
        path = self.base_path
        items = []
        for jsonl_path in path.iterdir():
            if jsonl_path.is_file() and jsonl_path.suffix == ".jsonl":
                fp = jsonl_path.open("r", encoding="utf-8")
                items.extend(fp.readlines())
                fp.close()

        return items

    def __len__(self):
        return len(self.items)

    def _get_text_box(self, items):
        text = []
        boxes = []
        for it in items:
            text.append(it["word"])
            boxes.append(it["box"])

        return "".join(text), np.array(boxes, dtype=np.float32)

    def __getitem__(self, idx):
        item = json.loads(self.items[idx])
        img_path = item["img_path"]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        text, boxes = self._get_text_box(item["detail"])
        return img / 255.0, text, boxes / 224.0


class WithBoxCollectFn:
    def __init__(self, tokenizer, transformers):
        self.tokenizer = tokenizer
        self.transformers = transformers

    def __call__(self, items):
        img = torch.cat([self.transformers(item[0]).unsqueeze(0) for item in items], dim=0)
        txt = [item[1] for item in items]
        source_txt = self.tokenizer(txt, return_tensors="pt", padding=True)
        lens = source_txt["input_ids"].size(1)
        boxes = []
        for item in items:
            box = item[2]
            if len(box) < lens:
                pad = np.array([[0.0, 0.0, 0.0, 0.0]] * (lens - len(box)))
                box = np.vstack((box, pad))
            boxes.append(box)
        boxes = np.array(boxes, dtype=np.float32)

        return img, source_txt, torch.tensor(boxes)


class CollectFn:
    def __init__(self, tokenizer, transformers):
        self.tokenizer = tokenizer
        self.transformers = transformers

    def __call__(self, items):
        img = torch.cat([self.transformers(item[0]).unsqueeze(0) for item in items], dim=0)
        txt = [item[1] for item in items]
        source_txt = self.tokenizer(txt, return_tensors="pt", padding=True)

        cls = np.array([item[2] for item in items], dtype=np.float32)

        return img, source_txt, torch.tensor(cls)
