import torchvision.models as models
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import random
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from datasets import CaptionDataset_tra  # Ensure you have this or a similar dataset class available
import os
import json
from tqdm import tqdm
from reward_model import EncoderCNN,SentenceEncoderRNN,RewardNetwork,GetRewards
from value_model import ValueNetwork



if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load the best model
    model_save_path = './RL/model/reward'
    value_save_path = './RL/model/value'
    best_model_filename = 'best_0.0635.pth'  # Update this to your actual best model filename
    best_model_path = os.path.join(model_save_path, best_model_filename)
    reward_model = torch.load(best_model_path)
    reward_model = reward_model.to(device)
    for param in reward_model.parameters():
        param.require_grad = False

    # Initialize the device for training


    data_folder = './epfs'  # folder with data files saved by create_input_files.py
    data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files
    workers = 48 
    print(f"create{workers}workers")  
    word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
    with open(word_map_file, 'r') as j:
        word_map = json.load(j)

    vocab_size = len(word_map)
    embed_size = 512
    cnn_embed_size = 512
    rnn_hidden_size = 512

    valueNetwork = ValueNetwork(vocab_size, embed_size, cnn_embed_size, rnn_hidden_size).to(device)


    if torch.cuda.device_count() > 1:
        reward_model = nn.DataParallel(reward_model)
        valueNetwork = nn.DataParallel(valueNetwork)
        print(f"Activated {torch.cuda.device_count()} GPUs!")

    # Define the loss function and optimizer
    criterion = nn.MSELoss().to(device)
    optimizer = optim.Adam(valueNetwork.parameters(), lr=0.0001, weight_decay=1e-4)
    
    # 添加学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True)
    
    valueNetwork.train()

    # Set training parameters
    batch_size = 64
    best_loss = float('inf')
    max_seq_len = 17
    value_losses = []
    v_num_epochs = 20
    best_models = []
    patience = 5  # 早停的耐心值
    patience_counter = 0  # 计数器
    last_loss = float('inf')
    
    # 增加梯度裁剪参数
    grad_clip = 5.0
    
    # Set up transformations and DataLoader
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_loader = DataLoader(
        CaptionDataset_tra(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
        batch_size=batch_size*2, shuffle=True, num_workers=workers*2, pin_memory=True, persistent_workers=True if workers > 0 else False )

    hidden = None

    # Training loop adaptation
    for epoch in range(v_num_epochs):
        epoch_loss = 0.0
        with tqdm(total=len(train_loader), desc=f"Epoch {epoch+1}/{v_num_epochs}") as pbar:    
            for i, (images, captions, caplens) in enumerate(train_loader):
                images = images.to(device, non_blocking=True)
                captions = captions.to(device, non_blocking=True)
                caplens = caplens.to(device, non_blocking=True)
                
                # 添加噪声到输入以增加鲁棒性
                if random.random() < 0.1:  # 10%的概率添加噪声
                    noise = torch.randn_like(images) * 0.01
                    images = images + noise
                
                rewards = GetRewards(images, captions, caplens, reward_model)
                
                # 使用更多样化的随机长度策略
                random_lengths = torch.zeros(captions.size(0), dtype=torch.long)
                for i in range(captions.size(0)):
                    # 确保随机长度至少为2，以便模型学习序列关系
                    min_len = min(2, caplens[i].item())
                    random_lengths[i] = torch.randint(min_len, caplens[i].item() + 1, (1,))

                random_captions = torch.zeros_like(captions)
                for i in range(captions.size(0)):
                    random_captions[i, :random_lengths[i]] = captions[i, :random_lengths[i]]

                updated_caplens = random_lengths

                values = valueNetwork(images, random_captions, updated_caplens)

                # 添加L1正则化以促进稀疏性
                l1_lambda = 1e-5
                l1_norm = sum(p.abs().sum() for p in valueNetwork.parameters())
                
                # 计算MSE损失
                mse_loss = criterion(values, rewards)
                
                # 总损失 = MSE损失 + L1正则化
                loss = mse_loss + l1_lambda * l1_norm

                # Backpropagation
                optimizer.zero_grad()
                loss.backward()
                
                # 应用梯度裁剪以防止梯度爆炸
                if grad_clip > 0:
                    nn.utils.clip_grad_norm_(valueNetwork.parameters(), grad_clip)
                
                optimizer.step()
                
                epoch_loss += loss.item()
                pbar.set_postfix(Loss=loss.item())
                pbar.update(1)
        
        # 计算平均epoch损失
        avg_epoch_loss = epoch_loss / len(train_loader)
        value_losses.append(avg_epoch_loss)
        print(f"Epoch {epoch+1} average loss: {avg_epoch_loss:.6f}")
        
        # 更新学习率调度器
        scheduler.step(avg_epoch_loss)
        
        # 早停机制
        if avg_epoch_loss < best_loss:
            best_loss = avg_epoch_loss
            patience_counter = 0
            
            # Save the entire model
            model_filename = f"best_{best_loss:.6f}.pth"
            checkpoint = {
                'model_state_dict': valueNetwork.module.state_dict() if isinstance(valueNetwork, nn.DataParallel) else valueNetwork.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch,
                'best_loss': best_loss
            }
            torch.save(checkpoint, os.path.join(value_save_path, model_filename))
            print(f"checkpoint saved:{model_filename}, loss {best_loss:.6f}")
            
            # Add new model to tracking list
            best_models.append(model_filename)
            
            # Remove oldest model if we have more than 5
            if len(best_models) > 5:
                oldest_model = best_models.pop(0)
                os.remove(os.path.join(value_save_path, oldest_model))
        else:
            patience_counter += 1
            if patience_counter >= patience:
                print(f"Early stopping triggered after {epoch+1} epochs")
                break

