import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import argparse
from RL_model import RL_model
from prepare_data import get_RL_data, collate_RL
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
import random
import argparse
import torch.nn.functional as F
from RL_model import RL_model
from scipy.stats import pearsonr  # 添加相关性计算的库
import numpy as np
# 假设 get_data 函数可以加载数据集
from prepare_data import get_RL_data, collate_RL

def train_aesthetic_model(opt):
    # 1. 加载数据集并划分为训练集和测试集
    train_dataset, test_dataset = get_RL_data(opt.photo_size, opt.data_path)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)

    # 初始化模型
    embedding_dim = calculate_embedding_dim(opt)
    aes_model = RL_model(opt, embedding_dim).cuda()
    
    # # 调用 train_RL 函数进行预训练
    train_RL(aes_model, train_loader, test_loader)
    
    best_epoch_results = []  # 用于存储每个 epoch 的最佳结果

    for epoch in range(20):  # 进行 20 个 epoch
        total_loss = 0
        total_difficult_dims = 0
        total_images = 0

        for _ in range(3):  # 每个 epoch 抽取 3 次
            sampled_images = random.sample(train_dataset, 100)  # 随机抽取 10 张图片
            images,name,labels = zip(*sampled_images)  # 解包图像和标签
            images = torch.stack(images).cuda()  # 将图像堆叠并移动到 GPU
            
            # 确保 labels 是张量
            labels = [torch.tensor(label).cuda() for label in labels]  # 将每个标签转换为张量并移动到 GPU
            labels = torch.stack(labels)  # 堆叠标签

            # 使用 AesModel 进行预测
            y_hat = aes_model.aes(images)  # 预测输出，形状为 [10, 77]

            # 3. 生成 y^^ 矩阵
            y_double_hat = y_hat.clone()  # 复制预测结果
            difficult_dims_list = []  # 存储每张图片需要人工标注的维度

            # 在训练过程中，初始化 arm_rewards 和 arm_counts
            arm_rewards = [0] * y_hat.size(1)  # 每个维度的初始奖励为0
            arm_counts = [1] * y_hat.size(1)  # 每个维度的初始选择次数为1，避免除以零

            for i in range(y_double_hat.size(0)):  # 遍历每张图片
                # 这里传入当前试验次数 t 和当前臂的奖励与选择次数
                t = epoch * 3 + _  # 当前试验次数
                difficult_dims = multi_arm_bandit(y_hat[i], labels[i], t, arm_rewards, arm_counts)
                difficult_dims_list.append(difficult_dims)
            
            # 更新arm_rewards和arm_counts
                for dim in difficult_dims:
                  arm_counts[dim] += 1
                  arm_rewards[dim] += torch.abs(y_hat[i][dim] - labels[i][dim]).item()  # 更新奖励（误差）
            # 计算有效的损失
            loss = calculate_loss(y_double_hat, labels)  # 计算损失
            total_loss += loss.item()  # 累加损失
            total_images += 10  # 统计总图片数

            # 更新 AesModel 的 CNN 模块
            optimizer = optim.Adam(aes_model.aes.parameters(), lr=opt.lr)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 更新 Submodel 模块
            submodel_loss = update_submodels(aes_model, y_double_hat, labels, difficult_dims_list)
            total_loss += submodel_loss

            # 统计需要人工标注的维度
            total_difficult_dims += sum(len(d) for d in difficult_dims_list)

        # 计算评估指标
        mae, mse, rmse, r2 = calculate_metrics(y_double_hat, labels)
        avg_difficult_dims = total_difficult_dims / total_images if total_images > 0 else 0
        print(f'Epoch [{epoch + 1}], MAE: {mae:.4f}, MSE: {mse:.4f}, RMSE: {rmse:.4f}, R²: {r2:.4f}, Avg. Difficult Dims: {avg_difficult_dims:.2f}')

def train_RL(model, train_loader, test_loader):
    model.aes.train()  # 设置模型为训练模式
    optimizer = optim.Adam(model.aes.parameters(), lr=model.opt.lr)  # 初始化优化器

    for epoch in range(model.opt.model_epochs):  # 进行多个训练周期
        total_loss = 0  # 初始化总损失
        for img, name, labels in train_loader:  # 遍历训练数据
            img = img.cuda()  # 将图像移动到 GPU
            labels = labels.cuda()  # 将标签移动到 GPU

            # 计算模型输出
            scores = model.aes(img)  # 计算模型输出

            # 计算损失
            loss = model.aes_loss_func1(scores, labels)  # 使用适当的损失函数
            total_loss += loss.item()  # 累加损失

            optimizer.zero_grad()  # 清空梯度
            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

        # # 计算评估指标
        # mae, mse, rmse, r2 = calculate_metrics(scores, labels)  # 计算评估指标
        # print(f'Epoch [{epoch + 1}/{model.opt.model_epochs}], Loss: {total_loss / len(train_loader):.4f}, MAE: {mae:.4f}, MSE: {mse:.4f}, RMSE: {rmse:.4f}, R²: {r2:.4f}')  # 打印每个周期的平均损失和评估指标

    # 在测试集上评估模型
    mae, mse, rmse, r2, pea, mape, mpe=model.test(test_loader)  # 调用测试函数
    mae, mse, rmse, r2, pea, mape, mpe = [metric.item() for metric in (mae, mse, rmse, r2, pea, mape, mpe)]

    print(f'预训练测试： MAE:{mae} MSE:{mse} RMSE:{rmse} R²:{r2} PEA:{pea} MAPE:{mape} MPE:{mpe}')

def calculate_embedding_dim(opt):
    embedding_dim = int((opt.photo_size - opt.Kernel_size1) // opt.Stride1 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int((embedding_dim - opt.Kernel_size2 + 2) // opt.Stride2 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    return int(embedding_dim * embedding_dim * 3)

import math

def multi_arm_bandit(y_hat, labels, t, arm_rewards, arm_counts, threshold=0.1):
    """
    使用UCB算法选择需要人工标注的维度。

    Args:
        y_hat (Tensor): 预测的输出 (形状 [N, num_features])。
        labels (Tensor): 真实标签 (形状 [N, num_features])。
        t (int): 当前的试验次数，用于更新探索项。
        arm_rewards (list): 每个臂（维度）目前的奖励值。
        arm_counts (list): 每个臂（维度）被选择的次数。

    Returns:
        list: 需要人工标注的维度。
    """
    # 计算预测值和真实标签之间的差异
    differences = torch.abs(y_hat.detach() - labels.detach()).cpu().numpy()  # 转换为 NumPy 数组以便于处理


    ucb_values = []
    
    # 确保 t 从 1 开始（至少为 1）
    t = max(t, 1)
    
    # 更新 UCB 奖励函数
    for arm in range(len(differences)):
        reward = 1 / (differences[arm] + 1e-6)  # 计算奖励，避免除以零
        arm_rewards[arm] += reward  # 更新 arm_rewards
        arm_counts[arm] += 1  # 更新 arm_counts

    # 计算 UCB 值
    ucb_values = []
    for arm in range(len(arm_rewards)):
        if arm_counts[arm] == 0:
            ucb_values.append(float('inf'))  # 如果该臂从未被拉动，赋予无穷大
        else:
            average_reward = arm_rewards[arm] / arm_counts[arm]
            exploration = np.sqrt((3 * np.log(t + 1e-6)) / (2 * arm_counts[arm]))  # 防止除零
            ucb_values.append(average_reward + exploration)

    # 选择需要人工标注的维度
    difficult_dims = sorted(range(len(ucb_values)), key=lambda i: ucb_values[i], reverse=True)[:3]  # 选择前 3 个需要标注的维度

    # 根据阈值动态选择难识别维度
    selected_dims = [dim for dim in difficult_dims if differences[dim] > threshold]

    return selected_dims  # 返回动态选择的难识别维度






def calculate_loss(y_double_hat, labels):
    # 计算有效的损失
    return torch.nn.functional.mse_loss(y_double_hat, labels)

def update_submodels(model, y_double_hat, labels, difficult_dims_list):
    submodel_loss = 0
    for i, difficult_dims in enumerate(difficult_dims_list):
        # 构建一个矩阵，其中真实值设置为1，预测值设置为0
        mask = torch.zeros_like(y_double_hat[i])  # 创建一个与 y_double_hat[i] 相同形状的零矩阵
        mask[difficult_dims] = 1  # 将需要人工标注的维度设置为1

        # 计算子模型的损失
        loss = calculate_loss(y_double_hat[i] * mask, labels[i] * mask)  # 只计算需要更新的部分
        
        # 打印损失以检查
        print(f"Submodel {i}, Loss before update: {loss.item()}")  # 打印当前损失

        submodel_loss += loss.item()  # 累加子模型损失

        # 这里可以添加子模型的更新逻辑
        # 例如：model.submodel.update(...)

    # 打印总损失以检查
    print(f"Total submodel loss: {submodel_loss}")  # 打印总损失
    return submodel_loss

def calculate_metrics(y_double_hat, labels):
    # 计算 MAE、MSE、RMSE 和 R²
    mae = torch.mean(torch.abs(y_double_hat - labels)).item()
    mse = torch.mean((y_double_hat - labels) ** 2)  # 保持为张量
    rmse = torch.sqrt(mse)  # 计算 RMSE
    r2 = 1 - (torch.sum((y_double_hat - labels) ** 2) / torch.sum((labels - torch.mean(labels)) ** 2)).item()
    return mae, mse.item(), rmse.item(), r2  # 返回浮点数

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # 添加所有必要的参数
    parser.add_argument('--batchSize', type=int, default=20, help='input batch size')
    parser.add_argument('--sample_size', type=int, default=20, help='input batch size')
    parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')
    parser.add_argument('--hidden_dim', type=int, default=100, help='hidden state size')
    parser.add_argument('--epochs', type=int, default=5, help='number of epochs to train for')
    parser.add_argument('--AL_epochs', type=int, default=5, help='number of epochs to train for')
    parser.add_argument('--query_num', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--model_epochs', type=int, default=20, help='number of epochs to train for')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    parser.add_argument('--mlp_ratio', type=float, default=1, help='the ratio of hidden layers in the middle')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    parser.add_argument('--photo_size', type=int, default=128, help='the number of categories to the last label')
    parser.add_argument('--Linear_nums', type=int, default=3, help='the number of categories to the last label')
    parser.add_argument('--data_path', type=str, default='./data', help='the number of categories to the last label')
    parser.add_argument('--agent_nums', type=int, default=77, help='the number of categories to the last label')
    parser.add_argument('--gamma', type=float, default=0.99, help='the number of categories to the last label')
    parser.add_argument('--epsilon', type=float, default=0.8, help=
                        'the number of categories to the last label')
    parser.add_argument('--target_update_nums', type=int, default=4, help='the number of categories to the last label')
    parser.add_argument('--ReplayBuffer_capacity', type=int, default=80, help='the number of categories to the last label')
    parser.add_argument('--min_size', type=int, default=60, help='the number of categories to the last label')
    parser.add_argument('--path_len', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--D', type=float, default=1.7, help='the number of categories to the last label')
    parser.add_argument('--a', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--model_name', type=str, default=str('Aes'), help='the number of categories to the last label')
    parser.add_argument('--mu', type=float, default=1, help='the number of categories to the last label')
    parser.add_argument('--embedding_dim', type=int, default=200, help='the number of categories to the last label')
    parser.add_argument('--msepara', type=int, default=1, help='the number of categories to the last label')
    parser.add_argument('--maepara', type=int, default=100, help='the number of categories to the last label')
    parser.add_argument('--expand_name', type=str, default=str('embedding_dim'), help='the number of categories to the last label')
    parser.add_argument('--is_adjust_parameter', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--CNN_USE_TRANSFORM', type=str, default=str('true'), help='the number of categories to the last label')
    parser.add_argument('--Submodel_USE_TRANSFORM', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Save_data', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Set_data', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Save_dimension_chosen_nums', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--state_type', type=str, default=str('0'), help='0表示完全,1表示state中只有7维统计, 2表示state中只有d维GAT表征, 3表示state中只有77维')
    parser.add_argument('--reward_type', type=str, default=str('0'), help='0表示完全,1表示reward中只有MSE的+负向的, 2表示reward中只有MAE的+负向的, 3表示reward中只有相关性的+负向的')
    parser.add_argument('--singleCapacity', type=int, default=200, help='the number of categories to the last label')
    parser.add_argument('--FocalLoss_alpha', type=float, default=0.75, help='the number of categories to the last label')
    parser.add_argument('--FocalLoss_gamma', type=float, default=3, help='the number of categories to the last label')
    parser.add_argument('--OUTPUT_TENSORS', type=str, default='True', help='the number of categories to the last label')

    opt = parser.parse_args()

    # 调用 train_aesthetic_model 函数
    train_aesthetic_model(opt)