from pathlib import Path
from typing import List

import torch
import torchvision.transforms as T
from PIL import Image
from rainbowneko.data import HandlerChain, LoadImageHandler, BaseDataset, BaseBucket
from rainbowneko.infer import DataLoaderAction
from rainbowneko.infer.workflow import (Actions, BuildModelAction, PrepareAction, ForwardAction,
                                        LambdaAction, LoadModelAction)
from rainbowneko.models.wrapper import SingleWrapper
from rainbowneko.parser import neko_cfg
from rainbowneko.parser.model import NekoModelLoader

from data import YoloSource
from models.clip import CLIPClassifier

mean_t = torch.tensor([0.48145466,0.4578275,0.40821073]).view(-1,1,1)
std_t = torch.tensor([0.26862954, 0.26130258, 0.27577711]).view(-1,1,1)


data_dir = Path("/data4/dzy/dataset/anime_textblock_detection_test/train")
out_dir = Path("/data4/dzy/dataset/anime_textblock_detection_test/train_clean") / 'labels'
out_dir.mkdir(parents=True, exist_ok=True)

transform=T.Compose([
    T.Resize((224,224)),
    T.ToTensor(),
    T.Normalize(mean=[0.48145466,0.4578275,0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
])

def crop_bbox(image:List[Image.Image], label:List, id:List[str], *states):
    patches = []
    patch_ids = []
    bboxes = []

    for image_i, label_i, id_i in zip(image, label, id):
        w, h = image_i.size
        for bbox in label_i:
            cls, x1, y1, x2, y2 = bbox
            if x2 > x1 and y2 > y1:
                x1, y1, x2, y2 = int(float(x1) * w), int(float(y1) * h), int(float(x2) * w), int(float(y2) * h)
                patch = image_i.crop((x1, y1, x2, y2))
                patches.append(transform(patch))
                bboxes.append(bbox)
                patch_ids.append(id_i)

    patches = torch.stack(patches)
    return {'image': patches, 'label': bboxes, 'id': patch_ids}

def save_labels(pred, bboxes, id, thr=0.92, **states):
    label_dict = {}
    for pred_i, bbox, id_i in zip(pred, bboxes, id):
        if id_i not in label_dict:
            label_dict[id_i] = []

        cls = int(pred_i[1]<=thr) # text:0, notext:1
        label_dict[id_i].append([cls, *bbox[1:]])

    for id_i, label in label_dict.items():
        (out_dir/ f'{id_i}.txt').write_text('\n'.join(label))

@neko_cfg
def infer_all(path):
    return DataLoaderAction(
        dataset=BaseDataset(_partial_=True, batch_size=32, loss_weight=1.0,
            source=dict(
                data_source1=YoloSource(
                    img_root=path / 'images',
                    label_root=path / 'labels',
                ),
            ),
            handler=HandlerChain(
                load=LoadImageHandler(),
            ),
            bucket=BaseBucket(),
        ),
        actions=Actions([
            LambdaAction(f_act=crop_bbox),
            ForwardAction(key_map_in=('image -> input.image', 'model -> model', 'device -> device', 'dtype -> dtype')),
            LambdaAction(f_act=save_labels, key_map_in=('output.pred -> pred', 'id -> id', 'label -> bboxes')),
            # VisClassAction(
            #     class_map=['not text', 'text'],
            #     key_map_in=('output.pred -> pred', 'id -> name')
            # )
        ])
    )

@neko_cfg
def make_cfg():
    return dict(workflow=Actions(actions=[
        PrepareAction(device='cuda', dtype=torch.float16),
        BuildModelAction(SingleWrapper(_partial_=True, model=CLIPClassifier.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K"))),
        LoadModelAction(dict(
            model=NekoModelLoader(
                module_to_load='model',
                path='exps/text_disc/ckpts/clip-text_det-1000.ckpt',
            ),
        )),
        infer_all(path=data_dir)
    ]))