import sys
import os

current_directory = os.getcwd()
sys.path.append(current_directory)

import torch
import torchvision
import torchvision.transforms as T
from torch import nn

from rainbowneko.infer import HandlerAction, DataLoaderAction
from rainbowneko.infer.workflow import (Actions, BuildModelAction, PrepareAction, FeedAction, ForwardAction,
                                        LambdaAction, VisClassAction, LoadModelAction)
from rainbowneko.models.wrapper import SingleWrapper
from rainbowneko.parser.model import NekoModelLoader
from rainbowneko.data import UnLabelSource, HandlerChain, LoadImageHandler, ImageHandler, BaseDataset, BaseBucket
from rainbowneko.utils import neko_cfg
from timm.data import create_transform
from models.clip import  CLIPClassifier
from torchvision.utils import save_image

num_classes = 2

mean_t = torch.tensor([0.48145466,0.4578275,0.40821073]).view(-1,1,1)
std_t = torch.tensor([0.26862954, 0.26130258, 0.27577711]).view(-1,1,1)

@neko_cfg
def infer_one(path):
    Actions([
        FeedAction(image=path),
        HandlerAction(handler=HandlerChain(
            load=LoadImageHandler(),
            image=ImageHandler(transform=T.Compose([
                T.Resize((224,224)),
                T.ToTensor(),
                T.Normalize(mean=[0.48145466,0.4578275,0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
            ]))
        ), key_map_in=('image -> image',)),
        LambdaAction(f_act=lambda image, **kwargs: save_image(image*std_t+mean_t, 'test.png')),
        LambdaAction(f_act=lambda image, **kwargs: {'image': image.unsqueeze(0)}),
        ForwardAction(key_map_in=('image -> input.image', 'model -> model', 'device -> device', 'dtype -> dtype')),
        VisClassAction(
            class_map=['not text', 'text'],
            key_map_in=('output.pred -> pred',)
        )
    ])

@neko_cfg
def infer_all(path):
    DataLoaderAction(
        dataset=BaseDataset(_partial_=True, batch_size=1, loss_weight=1.0,
            source=dict(
                data_source1=UnLabelSource(
                    img_root=path
                ),
            ),
            handler=HandlerChain(
                load=LoadImageHandler(),
                image=ImageHandler(transform=T.Compose([
                    T.Resize((224,224)),
                    T.ToTensor(),
                    T.Normalize(mean=[0.48145466,0.4578275,0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
                ]))
            ),
            bucket=BaseBucket(),
        ),
        actions=Actions([
            ForwardAction(key_map_in=('image -> input.image', 'model -> model', 'device -> device', 'dtype -> dtype')),
            VisClassAction(
                class_map=['not text', 'text'],
                key_map_in=('output.pred -> pred', 'id -> name')
            )
        ])
    )

def make_cfg():
    dict(workflow=Actions(actions=[
        PrepareAction(device='cuda', dtype=torch.float32),
        BuildModelAction(SingleWrapper(_partial_=True, model=CLIPClassifier.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K"))),
        LoadModelAction(dict(
            model=NekoModelLoader(
                module_to_load='model',
                path='exps/text_disc/ckpts/clip-text_det-1000.ckpt',
            ),
        )),
        #infer_one(path="imgs/p3.png")
        infer_all(path="imgs/")
    ]))