"""
Simple training script for DE-CN-CLIP
"""
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import AdamW
import sys
sys.path.append('../dataset')
from wenwu_dataset import WenwuDataset
from model import create_decnclip_model
import argparse
from tqdm import tqdm


def train_epoch(model, dataloader, optimizer, device):
    """Train for one epoch"""
    model.train()
    total_loss = 0
    
    for batch in tqdm(dataloader, desc="Training"):
        images, texts = batch
        images = images.to(device)
        
        # Tokenize Chinese texts
        text_inputs = model.tokenizer(
            texts, 
            return_tensors="pt", 
            padding=True, 
            truncation=True, 
            max_length=77
        ).to(device)
        
        optimizer.zero_grad()
        loss = model.compute_loss(images, text_inputs)
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
    
    return total_loss / len(dataloader)


def evaluate(model, dataloader, device):
    """Simple evaluation"""
    model.eval()
    total_loss = 0
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            images, texts = batch
            images = images.to(device)
            
            text_inputs = model.tokenizer(
                texts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=77
            ).to(device)
            
            loss = model.compute_loss(images, text_inputs)
            total_loss += loss.item()
    
    return total_loss / len(dataloader)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data-scale', type=float, default=0.1, help='Scale of data to use')
    parser.add_argument('--epochs', type=int, default=5, help='Number of epochs')
    parser.add_argument('--batch-size', type=int, default=32, help='Batch size')
    parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
    args = parser.parse_args()
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")
    
    # Create datasets with scale control
    train_end = 0.8 * args.data_scale
    val_start = 0.8 * args.data_scale
    val_end = 0.81 * args.data_scale
    
    train_dataset = WenwuDataset(0, train_end)
    val_dataset = WenwuDataset(val_start, val_end)
    
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
    
    print(f"Train samples: {len(train_dataset)}, Val samples: {len(val_dataset)}")
    
    # Create model
    model = create_decnclip_model()
    model = model.to(device)
    
    # Optimizer
    optimizer = AdamW(model.parameters(), lr=args.lr)
    
    # Training loop
    for epoch in range(args.epochs):
        print(f"\nEpoch {epoch+1}/{args.epochs}")
        
        train_loss = train_epoch(model, train_loader, optimizer, device)
        val_loss = evaluate(model, val_loader, device)
        
        print(f"Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}")
        
        # Save checkpoint
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_loss': train_loss,
            'val_loss': val_loss
        }, f'checkpoint_epoch_{epoch}.pt')
    
    print("Training completed!")


if __name__ == "__main__":
    main()