from functools import partial

import torch
import torchvision
import torchvision.transforms as T
from torch import nn
from torch.nn import CrossEntropyLoss
from torchmetrics.classification import MulticlassAccuracy, MulticlassF1Score

from cfgs.py.train import train_base, tuning_base
from rainbowneko.ckpt_manager import ckpt_manager
from rainbowneko.evaluate import MetricGroup, MetricContainer, Evaluator
from rainbowneko.models.wrapper import SingleWrapper
from rainbowneko.parser import CfgWDModelParser
from rainbowneko.parser.model import NekoModelLoader, NekoResumer
from rainbowneko.data import BaseDataset
from rainbowneko.data import BaseBucket
from rainbowneko.data.handler import HandlerChain, ImageHandler, LoadImageHandler
from rainbowneko.data.source import ImageFolderClassSource
from rainbowneko.train.loss import LossContainer
from rainbowneko.utils import neko_cfg, CosineLR
from models import CLIPClassifier
from timm.data import create_transform

num_classes=2

def make_cfg():
    dict(
        _base_=[train_base, tuning_base],
        exp_dir='exps/text_disc',

        model_part=CfgWDModelParser([
            dict(
                lr=2e-5,
                layers=['model.head', 'model.visual_projection'],
            ),
            dict(
                lr=1e-7,
                layers=['re:.*(2[6-9]|3[0-2])\.self_attn$', 're:.*(2[6-9]|3[0-2])\.mlp$'],
            )
        ]),

        # func(_partial_=True, ...) same as partial(func, ...)
        ckpt_manager=[
            ckpt_manager(saved_model=({'model':'model', 'trainable':False},))
        ],

        train=dict(
            train_steps=1000,
            workers=2,
            max_grad_norm=None,
            save_step=200,

            loss=LossContainer(loss=CrossEntropyLoss()),

            optimizer=partial(torch.optim.AdamW, weight_decay=5e-4),

            resume=NekoResumer(
                loader=dict(
                    model=NekoModelLoader(
                        module_to_load='model',
                        path='ckpts/clip_text_det.ckpt',
                    ),
                ),
                start_step=0
            ),

            scale_lr=False,
            scheduler=CosineLR(
                _partial_=True,
                warmup_steps=100,
            ),
            metrics=MetricGroup(
                acc=MetricContainer(MulticlassAccuracy(num_classes=num_classes)),
                f1=MetricContainer(MulticlassF1Score(num_classes=num_classes)),
            ),
        ),

        model=dict(
            name='clip-text_det',
            wrapper=partial(SingleWrapper, model=CLIPClassifier.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K"))
        ),

        data_train=cfg_data(), # config can be split into another function with @neko_cfg

        evaluator=None,
    )

@neko_cfg
def cfg_data():
    dict(
        dataset1=partial(BaseDataset, batch_size=64, loss_weight=1.0,
            source=dict(
                data_source1=ImageFolderClassSource(
                    img_root='/data3/dzy/dataset/text_disc'
                ),
            ),
            handler=HandlerChain(
                load=LoadImageHandler(),
                image=ImageHandler(transform=T.Compose([
                    T.Resize((224,224)),
                    T.RandomHorizontalFlip(),  # 随机水平翻转
                    T.RandomRotation(degrees=15),# 随机旋转
                    T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),#颜色抖动
                    T.ToTensor(),  # 将图像转换为 Tensor
                    T.Normalize(mean=[0.48145466,0.4578275,0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
                ]))
                # image=ImageHandler(transform=T.Compose([
                #     T.Resize((224,224)),
                #     T.RandomHorizontalFlip(),  # 随机水平翻转
                #     T.AutoAugment(),
                #     T.ToTensor(),  # 将图像转换为 Tensor
                #     T.Normalize(mean=[0.48145466,0.4578275,0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
                # ]))
            ),
            bucket=BaseBucket(),
        )
    )