from functools import cache
import os
from pathlib import Path
import json

import torch
from PIL import Image
import math
import tqdm
from torch.utils.data import Dataset
from torchvision.transforms import v2 as TV

# CLIP 官方预训练时常用的归一化参数（RGB 三个通道的均值和标准差）
CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
CLIP_STD = [0.26862954, 0.26130258, 0.27577711]


def load_dataset_config():
    """加载数据集配置文件"""
    config_path = Path(__file__).parent / "config.json"
    if config_path.exists():
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
            return config.get("data_root", "./dataset/")
    else:
        # 配置文件不存在时返回默认路径
        print("Warning: config.json not found, using default path './dataset/'")
        return "./dataset/"


@cache
def get_index_data():
    data_root = load_dataset_config()
    data_json_path: str = os.path.join(data_root, "data.json.txt")
    images_root = Path(data_root)
    with open(data_json_path, "r") as f:
        items = json.load(f)["items"]
        ids = sorted(items.keys())
    dynasties = set()
    categories = set()
    types = set()
    final_data = []
    next_id = 0
    path2id: dict[str, int] = {}
    images = set()
    for id in tqdm.tqdm(ids, desc="Indexing dataset"):
        item = items[id]
        dynasties.add(item["meta"]["年代"])
        categories.add(item["meta"]["分类"])
        types.update(item["types"])
        for caption in build_captions(item):
            if "img_paths" not in item:
                print(f"No images found for {item['name']}")
            else:
                for img_path in item["img_paths"]:
                    images.add(img_path)
                    if img_path not in path2id:
                        path2id[img_path] = next_id
                        next_id += 1
                    img_id = path2id[img_path]
                    dest = images_root / img_path
                    final_data.append((id, dest, caption, img_id))

    return {
        "index": final_data,
        "dynasties": dynasties,
        "categories": categories,
        "types": types,
        "images": images,
    }


def heat():
    index_data = get_index_data()
    images = index_data["images"]
    data_root = load_dataset_config()
    images_root = Path(data_root)
    for image_path in tqdm.tqdm(images, desc="Heating images"):
        # id, image, caption, img_id = self.data[i]
        get_image(str(images_root / image_path))


def build_captions(item) -> list[str]:
    result: list[str] = []
    result.append(f"{item.get('name')},{item['meta']['年代']},{item['meta']['分类']}")
    if item["patterns"]:
        result.append(f"{','.join(item['patterns'])}")
    if item["types"]:
        result.append(f"{','.join(item['types'])}")
    return result


@cache
def get_image(path: str):
    # print(f"Cache missing {path}")
    return Image.open(path).convert("RGB")


def decorator_timer(some_function):
    from time import time

    def wrapper(*args, **kwargs):
        t1 = time()
        result = some_function(*args, **kwargs)
        end = time() - t1
        print(f"Time taken: {end}")
        return result

    return wrapper


class WenwuDataset(Dataset):
    def __init__(
        self,
        start_p: float,
        end_p: float,
        memorize_images=False,
        img_preprocess=None,
        device: str = "cpu",
        use_augmentation: bool = True,
    ):
        super().__init__()
        self.img_preprocess = img_preprocess
        self.memorize_images = memorize_images
        indexing = get_index_data()["index"]
        self.data = indexing[
            math.floor(start_p * len(indexing)) : math.floor(end_p * len(indexing))
        ]
        
        # 根据是否使用数据增强选择不同的变换
        if use_augmentation:
            # 训练时使用数据增强
            self.transform = TV.Compose([
                TV.ToImage(),
                TV.ToDtype(torch.float32, scale=True),
                TV.RandomResizedCrop((224, 224), scale=(0.8, 1.0), ratio=(3/4, 4/3)),
                TV.RandomHorizontalFlip(),
                TV.Normalize(CLIP_MEAN, CLIP_STD),
            ]).to(device)
        else:
            # 验证/测试时不使用数据增强
            self.transform = TV.Compose([
                TV.ToImage(),
                TV.ToDtype(torch.float32, scale=True),
                TV.Resize((224, 224), interpolation=TV.InterpolationMode.BICUBIC),
                TV.CenterCrop(224),
                TV.Normalize(CLIP_MEAN, CLIP_STD),
            ]).to(device)

    def __len__(self):
        return len(self.data)

    # @decorator_timer
    def __getitem__(self, idx):
        id, image, caption, img_id = self.data[idx]
        if self.memorize_images:
            image = get_image(str(image))
        else:
            image = Image.open(image).convert("RGB")

        if self.img_preprocess:
            image_tensor = self.img_preprocess(image)
        else:
            image_tensor = self.transform(image)

        return image_tensor, caption, img_id


def get_train_set(data_scale: float = 1):
    # 训练集：0-80%区间的数据，按data_scale比例使用
    return WenwuDataset(0, 0.8 * data_scale, use_augmentation=True)


def get_val_set(data_scale: float = 1):
    # 验证集：80%-90%区间的数据，按data_scale比例使用
    return WenwuDataset(0.8, 0.8 + (0.1 * data_scale), use_augmentation=False)


def get_test_set(data_scale: float = 1):
    # 测试集：90%-100%区间的数据，按data_scale比例使用
    return WenwuDataset(0.9, 0.9 + (0.1 * data_scale), use_augmentation=False)


if __name__ == "__main__":
    print("Building captions dataset...")
    print(WenwuDataset(0, 1)[10])
