import os
import argparse
import torch
import torch.nn as nn
from torchvision import transforms
from timm import create_model
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import PIL


# -----------------------------
# 1. 参数解析
# -----------------------------
def parse_args():
    parser = argparse.ArgumentParser(description='MAE-ViT Inference Script')
    parser.add_argument('--data_path', type=str, required=True,
                        help='Path to validation dataset root (文件夹结构如 1/, 2/, 3/...)')
    parser.add_argument('--model', type=str, default='vit_base_patch16_224',
                        help='Model name, e.g., vit_base_patch16_224')
    parser.add_argument('--num_classes', type=int, default=50,
                        help='Total number of classes (even if some folders are empty)')
    parser.add_argument('--checkpoint', type=str, required=True,
                        help='Path to fine-tuned weights (.pth)')
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--device', type=str,
                        default='cuda' if torch.cuda.is_available() else 'cpu')
    parser.add_argument('--save_csv', type=str, default=None,
                        help='Optional: path to save inference results as CSV')
    parser.add_argument('--input_size', type=int, default=224,
                        help='Input image size (default: 224, same as MAE)')
    return parser.parse_args()


# -----------------------------
# 2. 数据加载（兼容MAE验证预处理）
# -----------------------------
class SafeImageDataset(Dataset):
    def __init__(self, root, transform=None, all_classes=None):
        self.root = root
        self.transform = transform
        self.samples = []
        self.class_names = all_classes or []

        valid_exts = ('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff', '.webp')

        # 构建样本列表
        for cls_name in self.class_names:
            cls_dir = os.path.join(root, cls_name)
            if not os.path.isdir(cls_dir):
                continue
            files = [os.path.join(cls_dir, f)
                     for f in os.listdir(cls_dir)
                     if f.lower().endswith(valid_exts)]
            for fp in files:
                self.samples.append((fp, self.class_names.index(cls_name)))

        if len(self.samples) == 0:
            raise RuntimeError(f"No valid images found in {root}")

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        path, label = self.samples[idx]
        img = Image.open(path).convert('RGB')
        if self.transform:
            img = self.transform(img)
        return img, label


def build_dataloader(data_path, batch_size, input_size=224):
    # 使用MAE验证数据预处理逻辑（与main_finetune.py一致）
    from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
    
    mean = IMAGENET_DEFAULT_MEAN
    std = IMAGENET_DEFAULT_STD
    
    # MAE验证数据预处理（来自util/datasets.py）
    t = []
    if input_size <= 224:
        crop_pct = 224 / 256  # crop_pct = 0.875
    else:
        crop_pct = 1.0
    size = int(input_size / crop_pct)  # size = int(224 / 0.875) = 256
    
    t.append(
        transforms.Resize(size, interpolation=PIL.Image.BICUBIC),  
    )
    t.append(transforms.CenterCrop(input_size))
    t.append(transforms.ToTensor())
    t.append(transforms.Normalize(mean, std))
    
    transform = transforms.Compose(t)
    
    # 确保类别顺序与MAE训练一致（字符串排序）
    all_classes = sorted([
        d for d in os.listdir(data_path)
        if os.path.isdir(os.path.join(data_path, d))
    ])

    dataset = SafeImageDataset(data_path, transform, all_classes)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)
    return dataloader, all_classes


# -----------------------------
# 3. 模型加载（兼容MAE checkpoint）
# -----------------------------
def load_model(model_name, num_classes, checkpoint_path, device, input_size=224):
    print(f"🧠 Creating model {model_name} ({num_classes} classes)...")
    model = create_model(model_name, pretrained=False, num_classes=num_classes, img_size=input_size)

    import argparse
    torch.serialization.add_safe_globals([argparse.Namespace])
    
    checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False)
    
    # MAE finetune checkpoint加载逻辑（来自main_finetune.py）
    print("Load pre-trained checkpoint from: %s" % checkpoint_path)
    checkpoint_model = checkpoint['model']
    
    # 处理head尺寸不匹配（如果有的话）
    state_dict = model.state_dict()
    for k in ['head.weight', 'head.bias']:
        if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
            print(f"Removing key {k} from pretrained checkpoint")
            del checkpoint_model[k]
    
    # 插值位置嵌入（如果输入尺寸不同）
    pos_embed_checkpoint = checkpoint_model['pos_embed']
    embedding_size = pos_embed_checkpoint.shape[-1]
    num_patches = model.patch_embed.num_patches
    num_extra_tokens = model.pos_embed.shape[-2] - num_patches
    # height (== width) for the checkpoint position embedding
    orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
    # height (== width) for the new position embedding
    new_size = int(num_patches ** 0.5)
    # class_token and dist_token are kept unchanged
    if orig_size != new_size:
        print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
        extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
        # only the position tokens are interpolated
        pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
        pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
        pos_tokens = torch.nn.functional.interpolate(
            pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
        pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
        new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
        checkpoint_model['pos_embed'] = new_pos_embed
    
    msg = model.load_state_dict(checkpoint_model, strict=False)
    print("✅ Loaded checkpoint:", msg)

    model.to(device)
    model.eval()
    return model


# -----------------------------
# 4. 推理过程（使用MAE准确率计算）
# -----------------------------
def run_inference(model, dataloader, device, class_names):
    from timm.utils import accuracy  # 使用MAE相同的准确率计算
    
    all_preds, all_labels = [], []
    all_outputs = []
    
    with torch.no_grad():
        for images, labels in tqdm(dataloader, desc="Inferencing"):
            images = images.to(device)
            labels = labels.to(device)
            
            outputs = model(images)
            preds = outputs.argmax(dim=1)
            
            # 使用MAE相同的准确率计算
            acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))
            
            all_preds.extend(preds.cpu().tolist())
            all_labels.extend(labels.cpu().tolist())
            all_outputs.extend(outputs.cpu().numpy())

    # 计算准确率（与MAE训练验证一致）
    correct = sum(p == t for p, t in zip(all_preds, all_labels))
    acc = correct / len(all_labels) if all_labels else 0.0
    print(f"\n✅ Inference finished!")
    print(f"Top-1 Accuracy = {acc * 100:.2f}% ({correct}/{len(all_labels)})")
    print("Detected class folders:", class_names)

    return all_preds, all_labels


# -----------------------------
# 5. 主入口
# -----------------------------
def main():
    args = parse_args()
    dataloader, class_names = build_dataloader(args.data_path, args.batch_size, args.input_size)
    model = load_model(args.model, args.num_classes, args.checkpoint, args.device, args.input_size)

    preds, labels = run_inference(model, dataloader, args.device, class_names)

    # 处理预测索引越界（防止模型输出>类别数）
    safe_preds = [p if p < len(class_names) else -1 for p in preds]

    if args.save_csv:
        import pandas as pd
        df = pd.DataFrame({
            'filename': [s[0] for s in dataloader.dataset.samples],
            'true_label': [class_names[l] if l < len(class_names) else "unknown" for l in labels],
            'pred_label': [class_names[p] if p != -1 else "out_of_range" for p in safe_preds],
            'true_idx': labels,
            'pred_idx': preds,
        })
        df.to_csv(args.save_csv, index=False)
        print(f"📁 Results saved to {args.save_csv}")


if __name__ == "__main__":
    main()