import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from datasets import *
import torchvision.models as models
from tqdm import tqdm
import os  # Ensure os is imported at the top of your file
from reward_model import RewardNetwork,EncoderCNN,SentenceEncoderRNN,GetRewards
from torch.cuda.amp import autocast, GradScaler

if __name__ == "__main__":

    batch_size_train = 256
    batch_size_val = 256
    workers = 10  # for data-loading; right now, only 1 works with h5py
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # sets device for model and PyTorch tensors

    data_folder = '../lanyun-fs'  # folder with data files saved by create_input_files.py
    data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files

    word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
    with open(word_map_file, 'r') as j:
        word_map = json.load(j)

    vocab_size = len(word_map)
    embed_size = 512
    cnn_embed_size = 512
    rnn_hidden_size = 512



    # Loss function
    import torch
    import torch.nn.functional as F

    def VisualSemanticEmbeddingLoss(visuals, semantics):
        gamma = 0.2
        N, D = visuals.shape
        
        # Corrected matrix operations for visual loss
        visual_dot = torch.mm(visuals, semantics.t())
        visual_diag = torch.diag(visual_dot).unsqueeze(1)
        visual_loss = visual_dot - visual_diag + (gamma/N) * (torch.ones((N, N), device=device) - torch.eye(N, device=device))
        visual_loss = torch.sum(F.relu(visual_loss)) / N
        
        # Corrected matrix operations for semantic loss
        semantic_dot = torch.mm(semantics, visuals.t())
        semantic_diag = torch.diag(semantic_dot).unsqueeze(1)
        semantic_loss = semantic_dot - semantic_diag + (gamma/N) * (torch.ones((N, N), device=device) - torch.eye(N, device=device))
        semantic_loss = torch.sum(F.relu(semantic_loss)) / N
        
        return visual_loss + semantic_loss

    def BidirectionalRankingLoss(image_emb, text_emb, beta=0.3, temperature=0.07):
        # 归一化嵌入向量，提高稳定性
        image_emb = nn.functional.normalize(image_emb, p=2, dim=1)
        text_emb = nn.functional.normalize(text_emb, p=2, dim=1)
        
        # 计算相似度矩阵
        sim_matrix = image_emb @ text_emb.t() / temperature
        
        # 正样本对（对角线）
        pos = torch.diag(sim_matrix)
        
        # 图像分支损失
        image_loss = torch.clamp(beta - pos.unsqueeze(1) + sim_matrix, min=0)
        image_loss.fill_diagonal_(0)  # 排除正样本对
        
        # 文本分支损失
        text_loss = torch.clamp(beta - pos.unsqueeze(0) + sim_matrix, min=0)
        text_loss.fill_diagonal_(0)
        
        # 添加InfoNCE对比损失组件
        labels = torch.arange(len(pos)).to(device)
        contrastive_loss = (
            nn.functional.cross_entropy(sim_matrix, labels) + 
            nn.functional.cross_entropy(sim_matrix.t(), labels)
        ) / 2
        
        ranking_loss = (image_loss.sum() + text_loss.sum()) / (2 * len(pos))
        
        # 组合损失
        return ranking_loss + 0.5 * contrastive_loss

    # Model instantiation and optimizer

    rnn_dim = 512
    reward_model = RewardNetwork(vocab_size, embed_size, cnn_embed_size, rnn_hidden_size).to(device)
    

    # 降低学习率，添加更强的权重衰减
    initial_lr = 5e-5  # 进一步降低初始学习率
    optimizer = optim.Adam(
        [
            {'params': [p for n, p in reward_model.named_parameters() if 'encoder_cnn' in n], 'lr': initial_lr * 0.1},
            {'params': [p for n, p in reward_model.named_parameters() if 'encoder_cnn' not in n]}
        ],
        lr=initial_lr, 
        weight_decay=1e-3  # 增加权重衰减
    )

    # 修改学习率调度器，使用更温和的衰减
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=20, eta_min=1e-6
    )

    scaler = GradScaler()
    # Data loading
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    
    
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        normalize
    ])

    train_loader = torch.utils.data.DataLoader(
        CaptionDataset_tra(data_folder, data_name, 'TRAIN', transform=train_transform),
        batch_size=batch_size_train, shuffle=True, num_workers=workers, pin_memory=True)
    
    # 添加验证集
    val_dataset = CaptionDataset_tra(data_folder, data_name, 'VAL', transform=transforms.Compose([normalize]))
    
    # 只使用验证集的0.01部分
    subset_size = int(len(val_dataset) * 1)
    indices = torch.randperm(len(val_dataset))[:subset_size]
    val_subset = torch.utils.data.Subset(val_dataset, indices)
    
    val_loader = torch.utils.data.DataLoader(
        val_subset,
        batch_size=batch_size_val, shuffle=False, num_workers=workers, pin_memory=True)

    best_loss = float('inf')  # Initialize best_loss to infinity
    model_save_path = '../lanyun-tmp/models/reward'  # Define the directory to save the models
    os.makedirs(model_save_path, exist_ok=True)  # Create the directory if it doesn't exist
    best_models = []  # List to track saved model filenames

    num_epochs = 100
    no_improvement = 0  # 添加这一行来初始化变量

    # 用于监控奖励值的函数
    def evaluate_rewards(model, data_loader, device, num_samples=10):
        model.eval()
        all_rewards = []
        
        with torch.no_grad():
            # 只取前num_samples个样本
            for i, (images, captions, caplens,_) in enumerate(data_loader):
                if i >= num_samples:
                    break
                    
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                
                # 获取奖励值
                rewards = GetRewards(images, captions, caplens, model)
                all_rewards.extend(rewards.cpu().numpy().flatten().tolist())
                
        return all_rewards
    
    # 修改损失函数，直接将奖励值纳入损失计算
    def RewardAwareLoss(image_features, sentence_features, alpha=0.5):
        """
        结合对比损失和直接奖励优化的损失函数
        alpha: 控制两种损失的权重平衡
        """
        # 标准对比损失部分
        image_features_norm = nn.functional.normalize(image_features, p=2, dim=1)
        sentence_features_norm = nn.functional.normalize(sentence_features, p=2, dim=1)
        
        # 计算相似度矩阵
        sim_matrix = torch.mm(image_features_norm, sentence_features_norm.t())
        
        # 对比损失 - 使用InfoNCE形式
        temperature = 0.07
        sim_matrix_scaled = sim_matrix / temperature
        labels = torch.arange(image_features.size(0)).to(device)
        contrastive_loss = (
            nn.functional.cross_entropy(sim_matrix_scaled, labels) + 
            nn.functional.cross_entropy(sim_matrix_scaled.t(), labels)
        ) / 2
        
        # 直接奖励优化部分 - 最大化对角线元素（即正样本对的相似度）
        rewards = torch.diag(sim_matrix)
        reward_loss = -torch.mean(rewards)  # 负号是因为我们要最大化奖励，即最小化负奖励
        
        # 组合损失
        combined_loss = (1 - alpha) * contrastive_loss + alpha * reward_loss
        
        # 返回总损失和当前批次的平均奖励值（用于监控）
        return combined_loss, rewards.mean().item()

    # Training loop
    for epoch in range(num_epochs):
        # 训练阶段
        reward_model.train()
        train_loss = 0.0
        train_rewards = 0.0
        
        with tqdm(total=len(train_loader), desc=f"Epoch {epoch+1}/{num_epochs}") as pbar:
            for i, (images, captions, caplens) in enumerate(train_loader):
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                optimizer.zero_grad()
                
                # 使用混合精度训练
                with autocast():
                    image_features, sentence_features = reward_model(images, captions, caplens)
                    loss, batch_reward = RewardAwareLoss(image_features, sentence_features, alpha=0.6)
                
                # 使用梯度缩放器
                scaler.scale(loss).backward()
                
                # 梯度裁剪，防止梯度爆炸
                scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(reward_model.parameters(), max_norm=1.0)
                
                scaler.step(optimizer)
                scaler.update()
                
                train_loss += loss.item()
                train_rewards += batch_reward
                pbar.set_postfix(Loss=loss.item(), Reward=batch_reward)
                pbar.update(1)
        
        # 计算平均训练损失和奖励
        avg_train_loss = train_loss / len(train_loader)
        avg_train_reward = train_rewards / len(train_loader)
        
        # 验证阶段
        reward_model.eval()
        val_loss = 0.0
        val_rewards = 0.0
        all_gt_rewards = []
        
        with torch.no_grad():
            for images, captions, caplens, _ in val_loader:
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                
                image_features, sentence_features = reward_model(images, captions, caplens)
                loss, batch_reward = RewardAwareLoss(image_features, sentence_features, alpha=0.6)
                val_loss += loss.item()
                val_rewards += batch_reward
                
                # 计算标准奖励值用于评估
                rewards = GetRewards(images, captions, caplens, reward_model)
                all_gt_rewards.extend(rewards.cpu().numpy().flatten().tolist())
        
        avg_val_loss = val_loss / len(val_loader)
        avg_val_reward = val_rewards / len(val_loader)
        avg_reward = sum(all_gt_rewards) / len(all_gt_rewards)
        min_reward = min(all_gt_rewards)
        max_reward = max(all_gt_rewards)
        
        print(f"Epoch {epoch+1}: Train Loss = {avg_train_loss:.4f}, Train Reward = {avg_train_reward:.4f}")
        print(f"Val Loss = {avg_val_loss:.4f}, Val Reward = {avg_val_reward:.4f}")
        print(f"GT Rewards: Avg = {avg_reward:.4f}, Min = {min_reward:.4f}, Max = {max_reward:.4f}")
        
        # 更新学习率
        scheduler.step()
        
        # 修改保存条件，直接基于奖励值
        if epoch > 5 and avg_reward > 0.2:
            model_filename = f"reward_e{epoch+1}_loss{avg_val_loss:.4f}_reward{avg_reward:.4f}.pth"
            torch.save(reward_model, os.path.join(model_save_path, model_filename))
            
            # Add new model to tracking list
            best_models.append((model_filename, avg_reward))
            
            # 按奖励值排序并保留最好的5个模型
            best_models = sorted(best_models, key=lambda x: x[1], reverse=True)[:5]
            
            # 删除不在列表中的旧模型
            current_models = [m[0] for m in best_models]
            for file in os.listdir(model_save_path):
                if file.endswith('.pth') and file not in current_models:
                    try:
                        os.remove(os.path.join(model_save_path, file))
                    except:
                        pass
            
            print(f"Saved {model_filename} with reward {avg_reward:.4f} (Keeping top 5 models by reward)")
        
        # 早停机制 - 直接基于奖励值
        patience = 10  # 增加耐心值
        if epoch > 15:
            if len(best_models) > 0 and avg_reward < best_models[0][1] - 0.001:
                no_improvement += 1
                if no_improvement >= patience:
                    print(f"Early stopping at epoch {epoch+1} - No improvement in rewards for {patience} epochs")
                    break
            else:
                no_improvement = 0

