import os
import json
from pathlib import Path
from dataclasses import dataclass
from tqdm import tqdm
import torch
from cn_clip.clip.model import convert_weights, CLIP
from cn_clip.training.main import convert_models_to_fp32
from cn_clip.eval.data import get_eval_img_dataset, get_eval_txt_dataset


@dataclass
class FeatureExtractorConfig:
    resume: str
    vision_model: str  # "ViT-B-16"
    text_model: str  # "RoBERTa-wwm-ext-base-chinese"
    precision: str = "amp"
    gpu: int = 0
    debug: bool = False


@dataclass
class ImageConfig(FeatureExtractorConfig):
    image_data: str | None = None
    output: str | None = None
    img_batch_size: int = 64


@dataclass
class TextConfig(FeatureExtractorConfig):
    text_data: str | None = None
    output: str | None = None
    context_length: int = 52
    text_batch_size: int = 64


def _load_model(config: FeatureExtractorConfig):
    vision_model_config_file = (
        Path(__file__).parent
        / f"model_configs/{config.vision_model.replace('/', '-')}.json"
    )
    text_model_config_file = (
        Path(__file__).parent
        / f"model_configs/{config.text_model.replace('/', '-')}.json"
    )
    # vision_model_config_file = f"./Chinese-CLIP/cn_clip/clip/model_configs/{config.vision_model.replace('/', '-')}.json"
    # text_model_config_file = f"./Chinese-CLIP/cn_clip/clip/model_configs/{config.text_model.replace('/', '-')}.json"
    assert os.path.exists(
        vision_model_config_file
    ), f"{vision_model_config_file} not exists!"
    assert os.path.exists(
        text_model_config_file
    ), f"{text_model_config_file} not exists!"
    with open(vision_model_config_file, "r") as fv, open(
        text_model_config_file, "r"
    ) as ft:
        model_info = json.load(fv)
        if isinstance(model_info["vision_layers"], str):
            model_info["vision_layers"] = eval(model_info["vision_layers"])
        for k, v in json.load(ft).items():
            model_info[k] = v
    model = CLIP(**model_info)
    convert_weights(model)
    if config.precision in ["amp", "fp32"]:
        convert_models_to_fp32(model)
    model.cuda(config.gpu)
    if config.precision == "fp16":
        convert_weights(model)
    checkpoint = torch.load(config.resume, map_location="cpu")
    sd = checkpoint["state_dict"]
    if next(iter(sd.items()))[0].startswith("module"):
        sd = {k[len("module.") :]: v for k, v in sd.items() if "bert.pooler" not in k}
    model.load_state_dict(sd)
    return model


def extract_text_features(config: FeatureExtractorConfig):
    if config.output is None:
        config.output = "{}.txt_feat.jsonl".format(config.text_data[:-6])
    model = _load_model(config)
    text_data_obj = get_eval_txt_dataset(config, max_txt_length=config.context_length)
    model.eval()
    write_cnt = 0
    with open(config.output, "w") as fout:
        dataloader = text_data_obj.dataloader
        with torch.no_grad():
            for batch in tqdm(dataloader):
                text_ids, texts = batch
                texts = texts.cuda(config.gpu, non_blocking=True)
                text_features = model(None, texts)
                text_features /= text_features.norm(dim=-1, keepdim=True)
                for text_id, text_feature in zip(
                    text_ids.tolist(), text_features.tolist()
                ):
                    fout.write(
                        json.dumps({"text_id": text_id, "feature": text_feature}) + "\n"
                    )
                    write_cnt += 1
    print(f"{write_cnt} text features are stored in {config.output}")


def extract_image_features(config: ImageConfig):
    if config.output is None:
        config.output = "{}.img_feat.jsonl".format(
            config.text_data.replace("_texts.jsonl", "_imgs")
        )
    model = _load_model(config)
    img_data_obj = get_eval_img_dataset(config)
    model.eval()
    write_cnt = 0
    with open(config.output, "w") as fout:
        dataloader = img_data_obj.dataloader
        with torch.no_grad():
            for batch in tqdm(dataloader):
                image_ids, images = batch
                images = images.cuda(config.gpu, non_blocking=True)
                image_features = model(images, None)
                image_features /= image_features.norm(dim=-1, keepdim=True)
                for image_id, image_feature in zip(
                    image_ids.tolist(), image_features.tolist()
                ):
                    fout.write(
                        json.dumps({"image_id": image_id, "feature": image_feature})
                        + "\n"
                    )
                    write_cnt += 1
    print(f"{write_cnt} image features are stored in {config.output}")
