import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader
from datasets import CaptionDataset_att_features  # 确保你有这个或类似的数据集类
import torchvision.transforms as transforms
from value_model import ValueNetwork_features, CNNv_features, ValueNetworkRNN
from reward_model import RewardNetwork_features, GetRewards_features, Encoder_features, SentenceEncoderRNN
from modelAttention import Encoder_features as Encoder_features_att
import json
import torch.backends.cudnn as cudnn
import os
from torch.nn.utils.rnn import pack_padded_sequence

# Parameters
data_folder = './data'  # folder with data files saved by create_input_files.py
data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files
checkpoint = './BEST_att_checkpoint_coco_5_cap_per_img_5_min_word_freq.pth.tar'  # model checkpoint
word_map_file = './data/WORDMAP_coco_5_cap_per_img_5_min_word_freq.json'  # word map, ensure it's the same the data was encoded with and the model was trained with
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # sets device for model and PyTorch tensors
cudnn.benchmark = True  # set to true only if inputs to model are fixed size; otherwise lot of computational overhead

# Load model
checkpoint = torch.load(checkpoint)
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.train()
encoder = Encoder_features_att().to(device)

optimizer_decoder = torch.optim.Adam(decoder.parameters(), lr=0.001)

# Load word map (word2ix)
with open(word_map_file, 'r') as j:
    word_map = json.load(j)
rev_word_map = {v: k for k, v in word_map.items()}
vocab_size = len(word_map)

# Normalization transform
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

# train_loader = DataLoader(
#     CaptionDataset_att_features('./data', 'coco_5_cap_per_img_5_min_word_freq', 'TRAIN', transform=transforms.Compose([normalize])),
#     batch_size=32, shuffle=True, num_workers=0, pin_memory=True)

full_val_dataset = CaptionDataset_att_features(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize]))

    # 计算10%的数据量
subset_size = int(0.1 * len(full_val_dataset))

    # 使用切片选择前10%的数据
small_val_dataset = torch.utils.data.Subset(full_val_dataset, range(subset_size))

    # 创建DataLoader
train_loader = torch.utils.data.DataLoader(small_val_dataset, batch_size=32, shuffle=True, num_workers=0, pin_memory=True)


value_network_path = './models/value/best_0.0006.pth'  # Update this path to your actual model path
value_network = torch.load(value_network_path).to(device)
value_network.train()
optimizer_value = torch.optim.Adam(value_network.parameters(), lr=0.001)
  # Set to evaluation mode

reward_network_path = './models/reward/best_0.0022.pth'
reward_network = torch.load(reward_network_path).to(device)
for param in reward_network.parameters():
    param.require_grad = False


# 训练循环
curriculum_levels = [2, 4, 6, 8, 10, 12, 14, 16]
for level in curriculum_levels:
    best_loss_actor = float('inf')
    best_loss_critic = float('inf')
    for epoch in range(10):  # 每个阶段训练10个epoch
        for i, (images, captions, caplens) in enumerate(train_loader):
            images, captions, caplens = images.to(device), captions.to(device), caplens.to(device)
            episodicAvgLoss = 0
            for episode in range(images.size(0)):  # 遍历每个样本
                log_probs = []
                values = []
                rewards = []
                caplen = caplens[episode].item()
                
                if (caplen - level > 1):  # 确保有足够的字幕长度进行训练
                    captions_in = captions[episode:episode+1, :caplen-level]
                    image_in = images[episode:episode+1]

                    for step in range(level):  # 逐步生成字幕
                        features_in = encoder(image_in)  # 编码图像
                        caption_length_tensor = torch.tensor([caplen - level], dtype=torch.long).to(device)
                        probs, _, lengths, _, _ = decoder(features_in, captions_in, caption_length_tensor)  # 解码器生成预测
                        probs, _ = pack_padded_sequence(probs, lengths, batch_first=True)
                        value = value_network(image_in, captions_in, caption_length_tensor)  # 评估当前状态的价值
                        probs = F.softmax(probs, dim=2)  # 应用softmax获取概率分布
                        
                        dist = probs.cpu().detach().numpy()[0,0]
                        action = np.random.choice(probs.shape[-1], p=dist)  # 从概率分布中采样一个动作
                        
                        gen_cap = torch.from_numpy(np.array([action])).unsqueeze(0).to(device)
                        captions_in = torch.cat((captions_in, gen_cap), axis=1)  # 更新字幕
                        
                        log_prob = torch.log(probs[0, 0, action])  # 计算对数概率
                        reward = GetRewards_features(image_in, captions_in, caption_length_tensor, reward_network)  # 计算奖励
                        
                        rewards.append(reward)
                        values.append(value)
                        log_probs.append(log_prob)
                        
                values = torch.stack(values).to(device)
                rewards = torch.stack(rewards).to(device)
                log_probs = torch.stack(log_probs).to(device)
                
                advantage = values - rewards  # 计算优势
                
                actorLoss = (-log_probs * advantage).mean()  # 计算actor的损失
                if actorLoss.item() < best_loss_actor: 
                    best_loss_actor = actorLoss
                    torch.save(decoder.state_dict(), f'./models/curriculum/best_actor_loss_{level}_at_{actorLoss.item()}.pth')
                criticLoss = 0.5 * advantage.pow(2).mean()  # 计算critic的损失
                if criticLoss.item() < best_loss_critic:
                    best_loss_critic = criticLoss
                    torch.save(value_network.state_dict(), f'./models/curriculum/best_critic_loss_{level}_at_{criticLoss.item()}.pth')
                optimizer_decoder.zero_grad()
                actorLoss.backward(retain_graph=True)  # 反向传播更新actor
                optimizer_decoder.step()
                
                optimizer_value.zero_grad()
                criticLoss.backward()  # 反向传播更新critic
                optimizer_value.step()
                
                episodicAvgLoss += (actorLoss.item() + criticLoss.item())/images.size(0)
            
            print(f"Level {level}, Epoch {epoch+1}, Batch {i+1}, Loss: {episodicAvgLoss:.4f}")
