import argparse
import os
from typing import List, Tuple

import torch
from PIL import Image
from torch.utils.data import DataLoader
from transformers import AdamW, CLIPModel, CLIPProcessor, get_scheduler


class CLIPTrainer:
    def __init__(
        self, model_name: str = "openai/clip-vit-base-patch32", device: str = None, local_model_path: str = None
    ):
        """初始化CLIP训练器

        Args:
            model_name: CLIP模型名称
            device: 训练设备 (cuda/cpu), 自动检测GPU
            local_model_path: 本地模型路径（可选）
        """
        self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
        
        # 确定模型路径
        model_path = local_model_path if local_model_path and os.path.exists(local_model_path) else model_name
        
        self.model = CLIPModel.from_pretrained(model_path).to(self.device)
        self.processor = CLIPProcessor.from_pretrained(model_path)

    def load_dataset(
        self, data_dir: str, image_exts: List[str] = [".jpg", ".png", ".jpeg"]
    ) -> List[Tuple[str, str]]:
        """从目录加载数据集

        Args:
            data_dir: 数据目录路径
            image_exts: 支持的图片扩展名

        Returns:
            图片路径和对应文本标签的列表

        支持两种标签格式:
        1. 图片文件名作为标签 (默认)
        2. 同名的.txt文件内容作为标签 (优先使用)
        """
        dataset = []
        for root, _, files in os.walk(data_dir):
            for file in files:
                if any(file.lower().endswith(ext) for ext in image_exts):
                    image_path = os.path.join(root, file)
                    # 查找对应的txt标签文件
                    txt_path = os.path.splitext(image_path)[0] + ".txt"

                    if os.path.exists(txt_path):
                        with open(txt_path, "r", encoding="utf-8") as f:
                            text_label = f.read().strip()
                    else:
                        # 使用文件名(不含扩展名)作为文本标签
                        text_label = os.path.splitext(file)[0]

                    dataset.append((image_path, text_label))
        return dataset

    def train(
        self,
        dataset: List[Tuple[str, str]],
        output_dir: str,
        batch_size: int = 8,
        num_epochs: int = 3,
        learning_rate: float = 5e-5,
        save_steps: int = 100,
    ):
        """训练CLIP模型

        Args:
            dataset: 训练数据集
            output_dir: 模型输出目录
            batch_size: 批次大小
            num_epochs: 训练轮数
            learning_rate: 学习率
            save_steps: 保存间隔步数
        """
        # 准备数据加载器
        train_dataloader = DataLoader(
            dataset, batch_size=batch_size, shuffle=True, collate_fn=self._collate_fn
        )

        # 优化器和学习率调度器
        optimizer = AdamW(self.model.parameters(), lr=learning_rate)
        lr_scheduler = get_scheduler(
            "linear",
            optimizer=optimizer,
            num_warmup_steps=0,
            num_training_steps=len(train_dataloader) * num_epochs,
        )

        # 训练循环
        self.model.train()
        global_step = 0
        for epoch in range(num_epochs):
            for batch in train_dataloader:
                # 前向传播
                image_emb = self.model.get_image_features(**batch["image_inputs"])
                text_emb = self.model.get_text_features(**batch["text_inputs"])

                # 计算对比损失
                logits = (image_emb @ text_emb.T) / 0.07
                labels = torch.arange(logits.size(0)).to(self.device)
                loss = torch.nn.CrossEntropyLoss()(logits, labels)

                # 反向传播
                loss.backward()
                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

                global_step += 1
                if global_step % save_steps == 0:
                    self._save_model(output_dir, global_step)

            # 每轮结束后保存
            self._save_model(output_dir, f"epoch_{epoch}")

    def _collate_fn(self, batch):
        """数据加载器的批处理函数"""
        image_paths, texts = zip(*batch)
        images = [Image.open(p).convert("RGB") for p in image_paths]
        image_inputs = self.processor(images=images, return_tensors="pt").to(
            self.device
        )
        text_inputs = self.processor(text=texts, return_tensors="pt").to(self.device)
        return {"image_inputs": image_inputs, "text_inputs": text_inputs}

    def _save_model(self, output_dir: str, suffix: str = ""):
        """保存模型"""
        save_path = os.path.join(output_dir, f"clip_model_{suffix}")
        self.model.save_pretrained(save_path)
        self.processor.save_pretrained(save_path)
        print(f"Model saved to {save_path}")


def main():
    parser = argparse.ArgumentParser(description="CLIP模型训练脚本")
    parser.add_argument(
        "--data_dir", "-dd", type=str, required=True, help="训练数据目录"
    )
    parser.add_argument(
        "--output_dir", "-od", type=str, required=True, help="模型输出目录"
    )
    parser.add_argument(
        "--model_name",
        "-mn",
        type=str,
        default="openai/clip-vit-base-patch32",
        help="预训练模型名称",
    )
    parser.add_argument(
        "--local_model_path",
        type=str,
        default=None,
        help="本地模型路径（可选）",
    )
    parser.add_argument("--batch_size", "-bs", type=int, default=8, help="训练批次大小")
    parser.add_argument("--epochs", "-ep", type=int, default=3, help="训练轮数")
    parser.add_argument("--lr", type=float, default=5e-5, help="学习率")
    args = parser.parse_args()
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)

    # 训练流程
    trainer = CLIPTrainer(args.model_name, local_model_path=args.local_model_path)
    dataset = trainer.load_dataset(args.data_dir)
    trainer.train(
        dataset=dataset,
        output_dir=args.output_dir,
        batch_size=args.batch_size,
        num_epochs=args.epochs,
        learning_rate=args.lr,
    )


if __name__ == "__main__":
    main()
