import argparse
import logging
import os
import random
from functools import partial
from typing import Any, Dict, List, Tuple

import numpy as np
import torch
import torchvision.transforms as T
from torch.optim import AdamW
from torch.utils.data import Dataset
from transformers import Trainer, TrainingArguments
from PIL import Image
from janus.models import MultiModalityCausalLM, VLChatProcessor

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

# Argument parsing
def parse_args():
    parser = argparse.ArgumentParser(description="Train Multi-modal Model")
    parser.add_argument("--data_dir", type=str, default="dataset/images")
    parser.add_argument("--pretrained_model", type=str, default="./pretrained/Janus-Pro-7B")
    parser.add_argument("--output_dir", type=str, default="./output")
    parser.add_argument("--batch_size", type=int, default=2)
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--lr", type=float, default=3e-4)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_token", type=int, default=14)
    return parser.parse_args()

# Set random seed
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

# Image-text pair dataset
class MultiModalDataset(Dataset):
    def __init__(self, data_dir):
        self.pairs = []
        img_exts = {".jpg", ".jpeg", ".png"}
        for file in os.listdir(data_dir):
            base, ext = os.path.splitext(file)
            if ext.lower() in img_exts and os.path.exists(os.path.join(data_dir, base + ".txt")):
                with open(os.path.join(data_dir, base + ".txt"), 'r', encoding='utf-8') as f:
                    text = f.read().strip()
                self.pairs.append((os.path.join(data_dir, file), text))

    def __len__(self):
        return len(self.pairs)

    def __getitem__(self, idx):
        img_path, txt = self.pairs[idx]
        img = Image.open(img_path).convert("RGB")
        transform = T.Compose([
            T.Resize((224, 224)),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
        ])
        img_tensor = transform(img)
        return {"image_tensor": img_tensor, "text": txt}

# Collate function to use image tensors directly
def collate_fn(batch, processor):
    images = [item["image_tensor"] for item in batch]
    texts = [item["text"] for item in batch]

    conversations = []
    for text in texts:
        conversations.extend([
            {"role": "<|User|>", "content": "<image_placeholder>\n描述图片内容。"},
            {"role": "<|Assistant|>", "content": text},
        ])

    encoded = processor(conversations=conversations, images=images, return_tensors="pt", force_batchify=True)
    encoded["labels"] = encoded["input_ids"].clone()
    return dict(encoded)

# Add new tokens to tokenizer and resize embeddings
def add_tokens(model, processor, new_tokens):
    num_new_tokens = processor.tokenizer.add_tokens(new_tokens)
    model.language_model.resize_token_embeddings(len(processor.tokenizer))

    # Freeze all params except new token embeddings
    for param in model.parameters():
        param.requires_grad = False

    embedding = model.language_model.get_input_embeddings()
    for token in new_tokens:
        idx = processor.tokenizer.convert_tokens_to_ids(token)
        embedding.weight[idx].requires_grad = True

# Main training script
def main():
    args = parse_args()
    set_seed(args.seed)

    dataset = MultiModalDataset(args.data_dir)

    processor = VLChatProcessor.from_pretrained(args.pretrained_model)
    model = MultiModalityCausalLM.from_pretrained(args.pretrained_model, torch_dtype=torch.float16, device_map="auto")

    # Define new tokens (grid tokens)
    new_tokens = [f"<grid_{i}>" for i in range(args.num_token)]
    add_tokens(model, processor, new_tokens)

    training_args = TrainingArguments(
        output_dir=args.output_dir,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        learning_rate=args.lr,
        fp16=True,
        save_strategy="epoch",
        logging_steps=10,
        remove_unused_columns=False,
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=dataset,
        data_collator=partial(collate_fn, processor=processor),
        optimizers=(AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr), None),
    )

    trainer.train()
    trainer.save_model(args.output_dir)
    processor.save_pretrained(args.output_dir)

if __name__ == "__main__":
    main()
