import torchvision.models as models
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import random
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from datasets import CaptionDataset_tra  # Ensure you have this or a similar dataset class available
import os
import json
from tqdm import tqdm
from reward_model import EncoderCNN, SentenceEncoderRNN, RewardNetwork, GetRewards
from value_model import ValueNetwork
from modelTra import EncoderCNN as TraEncoder, DecoderRNN  # Import the modelTra components
import warnings

warnings.filterwarnings("ignore", category=UserWarning)

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Load the best reward model
    model_save_path = '../lanyun-tmp/models/reward'
    value_save_path = '../lanyun-tmp/models/value'
    os.makedirs(value_save_path, exist_ok=True)  # Create the directory if it doesn't exist
    
    best_model_filename = 'reward_e7_loss0.4134_reward0.7321.pth'  # Update this to your actual best model filename
    best_model_path = os.path.join(model_save_path, best_model_filename)
    reward_model = torch.load(best_model_path)
    reward_model = reward_model.to(device)
    for param in reward_model.parameters():
        param.requires_grad = False  # Freeze reward model parameters
    
    # Data configuration
    data_folder = '../lanyun-tmp/data'  # folder with data files saved by create_input_files.py
    data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files
    workers = 10
    
    # Load word map
    word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
    with open(word_map_file, 'r') as j:
        word_map = json.load(j)

    # Model configuration
    vocab_size = len(word_map)
    embed_size = 512
    cnn_embed_size = 512
    rnn_hidden_size = 512

    # Initialize value network
    valueNetwork = ValueNetwork(vocab_size, embed_size, cnn_embed_size, rnn_hidden_size).to(device)
    
    
    # Load pre-trained modelTra weights if available
    # Uncomment and modify the path as needed
    checkpoint = './BEST_tra_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar' 
    checkpoint = torch.load(checkpoint)
    decoder = checkpoint['decoder']
    decoder = decoder.to(device)
    decoder.eval()
    encoder = checkpoint['encoder']
    encoder = encoder.to(device)
    encoder.eval()
    encoder.resnet._modules['8'] = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)
    # Define the loss function and optimizer
    criterion = nn.MSELoss().to(device)
    initial_lr = 0.0001
    optimizer = optim.Adam(valueNetwork.parameters(), lr=initial_lr, weight_decay=5e-4)
    
    # Use cosine annealing scheduler
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=200, eta_min=1e-6
    )

    # Set training parameters
    batch_size_train = 64
    batch_size_val = 64
    best_loss = float('inf')
    max_seq_len = 17
    v_num_epochs = 100
    best_models = []  # List to track saved model filenames

    # Set up transformations and DataLoader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    
    # Training dataset
    train_loader = DataLoader(
        CaptionDataset_tra(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
        batch_size=batch_size_train, shuffle=True, num_workers=workers, pin_memory=True)
    
    # Validation dataset
    val_dataset = CaptionDataset_tra(data_folder, data_name, 'VAL', transform=transforms.Compose([normalize]))
    
    # Use a subset of validation data
    subset_size = int(len(val_dataset) *1)
    indices = torch.randperm(len(val_dataset))[:subset_size]
    val_subset = torch.utils.data.Subset(val_dataset, indices)
    
    val_loader = torch.utils.data.DataLoader(
        val_subset,
        batch_size=batch_size_val, shuffle=False, num_workers=workers, pin_memory=True)

    # Training loop
    for epoch in range(v_num_epochs):
        # Training phase
        valueNetwork.train()
        train_loss = 0.0
        
        with tqdm(total=len(train_loader), desc=f"Epoch {epoch+1}/{v_num_epochs}") as pbar:    
            for i, (images, captions, caplens) in enumerate(train_loader):
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                
                # Generate captions using modelTra
                with torch.no_grad():
                    features = encoder(images)
                    generated_captions, _, decode_lengths, _ = decoder(features, captions, caplens)
                    
                    # Convert predictions to token indices
                    _, predicted_indices = torch.max(generated_captions, dim=2)
                    
                    # Use generated captions instead of ground truth
                    tra_captions = predicted_indices
                    tra_caplens = torch.tensor(decode_lengths, device=device)
                
                if epoch > 5:
                    # Generate random lengths for partial captions
                    random_lengths = torch.zeros(tra_captions.size(0), dtype=torch.long, device=device)
                    for j in range(tra_captions.size(0)):
                        max_len = tra_caplens[j].item() if tra_caplens[j].item() > 0 else 1
                        random_lengths[j] = torch.randint(1, max_len + 1, (1,), device=device)

                    random_captions = torch.zeros_like(tra_captions)
                    for j in range(tra_captions.size(0)):
                        random_captions[j, :random_lengths[j]] = tra_captions[j, :random_lengths[j]]

                    updated_caplens = random_lengths
                else:
                    random_captions = tra_captions
                    updated_caplens = tra_caplens
                
                # Get rewards from reward model
                with torch.no_grad():
                    rewards = GetRewards(images, tra_captions, tra_caplens, reward_model)
                    #print("rewards:", rewards)
                
                # Forward pass through value network
                values = valueNetwork(images, random_captions, updated_caplens)
                
                # Compute loss
                loss = criterion(values, rewards)
                
                # Backpropagation
                optimizer.zero_grad()
                loss.backward()
                
                # Add gradient clipping
                torch.nn.utils.clip_grad_norm_(valueNetwork.parameters(), max_norm=1.0)
                
                optimizer.step()
                train_loss += loss.item()
                pbar.set_postfix(Loss=loss.item())
                pbar.update(1)
        
        # Calculate average training loss
        avg_train_loss = train_loss / len(train_loader)
        
        # Validation phase
        valueNetwork.eval()
        val_loss = 0.0
        with torch.no_grad():
            for images, captions, caplens, _ in val_loader:
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                
                # Generate captions using modelTra
                features = encoder(images)
                generated_captions, _, decode_lengths, _ = decoder(features, captions, caplens)
                
                # Convert predictions to token indices
                _, predicted_indices = torch.max(generated_captions, dim=2)
                
                # Use generated captions instead of ground truth
                tra_captions = predicted_indices
                tra_caplens = torch.tensor(decode_lengths, device=device)
                
                if epoch > 50:
                    random_lengths = torch.zeros(tra_captions.size(0), dtype=torch.long, device=device)
                    for j in range(tra_captions.size(0)):
                        max_len = tra_caplens[j].item() if tra_caplens[j].item() > 0 else 1
                        random_lengths[j] = torch.randint(1, max_len + 1, (1,), device=device)

                    random_captions = torch.zeros_like(tra_captions)
                    for j in range(tra_captions.size(0)):
                        random_captions[j, :random_lengths[j]] = tra_captions[j, :random_lengths[j]]

                    updated_caplens = random_lengths
                else:
                    random_captions = tra_captions
                    updated_caplens = tra_caplens
                
                # Get rewards and values
                rewards = GetRewards(images, tra_captions, tra_caplens, reward_model)
                values = valueNetwork(images, random_captions, updated_caplens)
                
                # Compute validation loss
                loss = criterion(values, rewards)
                val_loss += loss.item()
        
        avg_val_loss = val_loss / len(val_loader)
        print(f"Epoch {epoch+1}: Train Loss = {avg_train_loss:.8f}, Val Loss = {avg_val_loss:.8f}")
        
        # Update learning rate
        scheduler.step()
        
        # Save model based on validation loss
        if avg_val_loss < best_loss:
            best_loss = avg_val_loss
            
            model_filename = f"best_{best_loss:.8f}.pth"
            torch.save(valueNetwork, os.path.join(value_save_path, model_filename))
            
            # Add new model to tracking list
            best_models.append(model_filename)
            
            # Remove oldest model if we have more than 5
            if len(best_models) > 5:
                oldest_model = best_models.pop(0)
                os.remove(os.path.join(value_save_path, oldest_model))
            
            print(f"Saved {model_filename} with validation loss {best_loss:.8f} (Keeping {len(best_models)} models)")
        
        # Early stopping mechanism
        patience = 5
        no_improvement = 0
        
        if epoch > 10:
            if avg_val_loss > best_loss:
                no_improvement += 1
                if no_improvement >= patience:
                    print(f"Early stopping at epoch {epoch+1}")
                    break
            else:
                no_improvement = 0

