import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from prepare_data import *
import argparse
import torch
import torch.nn as nn
from PIL import Image
from scipy.stats import pearsonr
from model import AesModel
from torch.utils.data import DataLoader
from Actor_Critic_model import *


# 加载美学数据的函数
def load_aesthetic_data(photo_size):
    train_data, test_data = get_RL_data(photo_size)  # 获取训练和测试数据
    # 提取数据中的第2个元素，这个元素假设是美学评分
    train_set = [sample[2] for sample in train_data]
    test_set = [sample[2] for sample in test_data]

    # 转换为浮点数类型
    train_set = np.array(train_set, dtype=np.float32)
    test_set = np.array(test_set, dtype=np.float32)

    return train_set, test_set  # 返回训练集和测试集


# 计算每个学生标注的美学维度数量的平均值
def calculate_avg_annotations_per_student(train_set):
    # 计算每个学生标注的维度数量（非零项）
    num_annotations_per_student = np.sum(train_set != 0, axis=1)
    # 计算所有学生标注维度数量的平均值
    avg_annotations_per_student = np.mean(num_annotations_per_student)
    return avg_annotations_per_student



# 计算美学能力评分提升的奖励
def calculate_accuracy_improvement_reward(previous_mse, current_mse):
    mse_diff = max(current_mse - previous_mse, 0)  # 计算MSE的差值，若当前MSE小于前一MSE，则差值为0
    reward = 1 / (1 + mse_diff)  # 根据MSE的改进计算奖励（MSE越小，奖励越高）
    return reward



# 计算选中维度的相关性奖励
def calculate_relevance_reward(selected_dimensions):
    if len(selected_dimensions) < 2:
        return 0  # 如果选中的维度小于2，无法计算相关性

    correlation_sum = 0
    num_pairs = 0
    # 计算所有维度对之间的皮尔逊相关系数
    for i in range(len(selected_dimensions)):
        for j in range(i + 1, len(selected_dimensions)):
            corr, _ = pearsonr(selected_dimensions[i], selected_dimensions[j])  # 计算相关系数
            correlation_sum += corr
            num_pairs += 1

    avg_correlation = correlation_sum / num_pairs if num_pairs > 0 else 0  # 计算所有维度对的平均相关性
    reward = 1 / (1 + (1 - avg_correlation))  # 根据平均相关性计算奖励
    return reward



# 计算维度选择的惩罚
def calculate_penalty(num_selected_dimensions, penalty_factor):
    penalty = num_selected_dimensions * penalty_factor  # 根据选中的维度数量计算惩罚
    return penalty


def calculate_total_reward(previous_mse, current_mse, selected_dimensions, num_selected_dimensions, penalty_factor,
                            w1=1.0, w2=1.0, w3=0.5):
    print(f"previous_mse: {previous_mse}, current_mse: {current_mse}, selected_dimensions: {selected_dimensions}")
    accuracy_reward = calculate_accuracy_improvement_reward(previous_mse, current_mse)
    relevance_reward = calculate_relevance_reward(selected_dimensions)
    penalty = calculate_penalty(num_selected_dimensions, penalty_factor)

    # 打印奖励分量以进行调试
    print(f"accuracy_reward: {accuracy_reward}, relevance_reward: {relevance_reward}, penalty: {penalty}")

    total_reward = w1 * accuracy_reward + w2 * relevance_reward - w3 * penalty
    print(f"total_reward: {total_reward}")  # 打印总奖励
    return total_reward



import numpy as np
from scipy.stats import pearsonr


import numpy as np

class MultiArmedBanditGFS:
    def __init__(self, num_arms, exploration_ratio, reward_function, initial_values, penalty_factor):
        self.num_arms = num_arms
        self.epsilon = exploration_ratio  # ε-greedy的探索比例
        self.reward_function = reward_function
        self.penalty_factor = penalty_factor

        # 初始的Alpha和Beta，用于Beta分布
        self.alpha = np.full(num_arms, 1.0 + np.array(initial_values))  # 增加初始化值
        self.beta = np.full(num_arms, 2.0)  # 调高Beta，增强探索
        self.last_mse = np.inf
        self.selected_at_t = np.zeros(num_arms, dtype=bool)  # 记录时间t选择的维度
        self.selected_at_t1 = np.zeros(num_arms, dtype=bool)  # 记录时间t+1选择的维度
        self.reward_history = np.zeros(num_arms)  # 存储每个维度的奖励历史记录

    def generative_oracle(self):
        """ 使用ε-greedy策略结合Beta分布进行维度选择 """
        if np.random.uniform(0, 1) < self.epsilon:
            # 探索：随机选择维度
            return np.random.choice(self.num_arms, self.num_arms, replace=False)
        else:
            # 利用：根据Beta分布采样，选择奖励期望最大的维度
            sampled_values = np.random.beta(self.alpha, self.beta)
            return np.argsort(sampled_values)[-self.num_arms:]

    def update_beta_parameters(self, selected_arms, reward_t, reward_t1):
        """ 更新Beta分布的参数，根据奖励调整探索/利用的平衡 """
        for arm in selected_arms:
            if reward_t[arm] > reward_t1[arm]:
                self.beta[arm] += 1  # 增加β，强化对已选择维度的探索
            else:
                self.alpha[arm] += 1  # 增加α，强化对高奖励维度的利用

    def update_selected_dims(self, selected_arms, reward_t, reward_t1):
        """ 更新被选中的维度 """
        for arm in selected_arms:
            print(f"维度 {arm} 当前奖励: {reward_t[arm]}, 上次奖励: {reward_t1[arm]}")
            if reward_t[arm] < reward_t1[arm]:
                print(f"取消选择维度 {arm}")
                self.selected_at_t1[arm] = False
            else:
                self.selected_at_t1[arm] = True

    def allocate_rewards(self, selected_at_t, selected_at_t1, reward_t1):
        """ 根据选择的维度分配奖励 """
        allocated_rewards = np.zeros(self.num_arms)  # 初始化奖励数组

        for arm in range(self.num_arms):
            if selected_at_t[arm] and selected_at_t1[arm]:
                allocated_rewards[arm] = reward_t1  # 在t和t+1都被选择的维度
            elif not selected_at_t[arm] and selected_at_t1[arm]:
                allocated_rewards[arm] = reward_t1  # 新选择的维度
            elif selected_at_t[arm] and not selected_at_t1[arm]:
                allocated_rewards[arm] = reward_t1  # 取消选择的维度

        return allocated_rewards

    def run(self, iterations, train_dim, test_dim):
        """ 运行多臂赌博机算法 """
        dim_mse_values = []
        for i in range(iterations):
            selected_arms = self.generative_oracle()

            # 计算当前维度的 MSE
            current_dim_mse = np.mean((train_dim - test_dim) ** 2, axis=0)
            dim_mse_values.append(current_dim_mse)

            # 当前选中维度的 MSE
            current_mse = np.mean(current_dim_mse[selected_arms])

            print(f"选择的维度: {selected_arms}, 当前MSE: {current_mse}")

            # 计算奖励并更新β参数
            reward_t = self.reward_function(self.last_mse, current_mse, train_dim[selected_arms], len(selected_arms),
                                            self.penalty_factor)

            print(f"当前奖励: {reward_t}")

            # 将每个维度的奖励单独处理
            reward_t_per_dim = np.zeros(self.num_arms)
            for arm in selected_arms:
                reward_t_per_dim[arm] = reward_t  # 记录每个维度的奖励

            self.update_beta_parameters(selected_arms, reward_t_per_dim, self.reward_history[selected_arms])
            self.last_mse = current_mse

            # 记录奖励
            self.reward_history[selected_arms] = reward_t_per_dim[selected_arms]

            # 根据奖励更新选中的维度
            self.update_selected_dims(selected_arms, reward_t_per_dim, self.reward_history[selected_arms])

            # 记录本轮选中的维度
            self.selected_at_t = np.copy(self.selected_at_t1)

        # 返回最终选中的维度
        final_selected_dims = np.where(self.selected_at_t1 == True)[0]
        return final_selected_dims, np.mean(dim_mse_values)



# # 主流程
# def main():
#     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#     train_set, test_set = load_aesthetic_data(opt.photo_size)
#     train_set = train_set[:861, :]  # 截取训练集的前861个样本
#
#     train_setnew, test_setnew = get_RL_data(opt.photo_size)
#     train_data = DataLoader(dataset=train_setnew, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True,
#                             num_workers=0, drop_last=True)
#     test_data = DataLoader(dataset=test_setnew, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True,
#                            num_workers=0, drop_last=True)
#
#     num_arms = train_set.shape[1]  # 每个美学维度作为一个“臂”
#     exploration_ratio = 0.1  # 探索概率
#
#     all_aes_ability = []
#     aes_model = AesModel(embedding_dim=opt.embedding_dim, opt=opt).cuda()
#
#     aes_model.load_state_dict(torch.load('./aes_model_weight.pth'))
#     num_visits, tl_visits = 0, int(train_data.batch_size) * len(train_data) * opt.path_len
#     train_agents = {}
#
#     # 在循环外初始化存储所有图片美学能力的列表
#     all_aes_ability = []
#
#     while num_visits < tl_visits:
#         for img, name, labels in train_data:
#             for i in range(len(img)):
#                 # 如果图片名称未在 train_agents 中，进行处理
#                 if name[i] not in train_agents:
#                     train_agents[name[i]] = {}
#
#                     # 获取单张图片，推理美学能力
#                     one_picture = img[i].unsqueeze(0).cuda()
#                     aes_ability_tensor, _ = aes_model(one_picture)  # aes_model 输出美学能力
#
#                     # 进行 Min-Max 归一化到 [0.2, 0.8]
#                     min_val = aes_ability_tensor.min()
#                     max_val = aes_ability_tensor.max()
#                     aes_ability_tensor = (aes_ability_tensor - min_val) * (0.8 - 0.2) / (max_val - min_val) + 0.2
#
#                     # 将当前图片的结果转换为 numpy 并存储到 all_aes_ability 中
#                     all_aes_ability.append(aes_ability_tensor.cpu().detach().numpy())
#
#     # 最终将所有图片的美学能力列表转换为 numpy 数组
#     all_aes_ability = np.array(all_aes_ability)
#
#     initial_values = np.mean(all_aes_ability, axis=0)
#     penalty_factor = 0.1
#     mab = MultiArmedBanditGFS(
#         num_arms=num_arms,
#         exploration_ratio=exploration_ratio,
#         reward_function=calculate_total_reward,
#         initial_values=initial_values,
#         penalty_factor=penalty_factor
#     )
#
#     iterations = 100
#     final_selected_dims, avg_mse = mab.run(iterations=iterations, train_dim=train_set, test_dim=test_set)
#     print(f"最终模型的平均MSE值: {avg_mse:.4f}")
#
#
#     # 计算每个学生人工标注维度的平均数量
#     avg_annotations_per_student = calculate_avg_annotations_per_student(train_set)
#     print(f"每个学生人工标注的维度数量的平均值: {avg_annotations_per_student:.2f}")
#
#     # 根据MSE动态选择需要人工标注的维度
#     mse_values = np.random.uniform(0, 0.1, size=train_set.shape[1])  # 模拟每个维度的MSE
#     selected_dimensions = dynamic_annotation_selection(mse_values, threshold=0.01)
#     print(f"选中的维度: {selected_dimensions}")


def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载小批量数据
    train_set, test_set = load_aesthetic_data(opt.photo_size)
    train_set = train_set[:100, :]  # 使用 100 条数据进行训练
    test_set = test_set[:100, :]  # 使用 100 条数据进行测试

    train_setnew, test_setnew = get_RL_data(opt.photo_size)
    train_data = DataLoader(dataset=train_setnew, batch_size=5, collate_fn=collate_RL, pin_memory=True,
                            num_workers=0, drop_last=True)
    test_data = DataLoader(dataset=test_setnew, batch_size=5, collate_fn=collate_RL, pin_memory=True,
                           num_workers=0, drop_last=True)

    num_arms = train_set.shape[1]  # 每个美学维度作为一个“臂”
    exploration_ratio = 0.1  # 探索概率
    penalty_factor = 0.1
    iterations = 10  # 缩短迭代次数
    lambda_val = 1.0  # 平衡准确度的超参数
    mu_val = 0.1  # 平衡注释成本的超参数

    aes_model = AesModel(embedding_dim=opt.embedding_dim, opt=opt).to(device)
    aes_model.load_state_dict(torch.load('./aes_model_weight.pth'))

    # 初始化美学能力
    all_aes_ability = np.random.uniform(0.2, 0.8, size=(100, num_arms))
    initial_values = np.mean(all_aes_ability, axis=0)

    # 初始化多臂赌博机
    mab = MultiArmedBanditGFS(
        num_arms=num_arms,
        exploration_ratio=exploration_ratio,
        reward_function=calculate_total_reward,
        initial_values=initial_values,
        penalty_factor=penalty_factor,

    )

    # 运行多臂赌博机
    final_selected_dims, avg_dim_mse = mab.run(iterations=10, train_dim=train_set, test_dim=test_set)
    # selected_dimensions = dynamic_annotation_selection(avg_dim_mse, threshold=0.02)
    print(f"选中的维度: {final_selected_dims}")
    # avg_mse = np.mean(avg_dim_mse)  # 平均所有维度和迭代的 MSE

    print(f"最终模型的平均MSE值: {avg_dim_mse:.4f}")

    # # 计算每个学生人工标注维度的平均数量
    # avg_annotations_per_student = calculate_avg_annotations_per_student(train_set)
    # print(f"每个学生人工标注的维度数量的平均值: {avg_annotations_per_student:.2f}")
    # # 根据MSE动态选择需要人工标注的维度
    # mse_values = np.random.uniform(0, 0.1, size=train_set.shape[1])  # 模拟每个维度的MSE
    # selected_dimensions = dynamic_annotation_selection(mse_values, threshold=0.01)
    # print(f"选中的维度: {selected_dimensions}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchSize', type=int, default=10, help='input batch size')  # 原本是10000 现改5000
    parser.add_argument('--batch_size', type=int, default=20, help='input batch size')  # 原本是10000 现改5000
    parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')  # 原本是200 现改500
    parser.add_argument('--hidden_dim', type=int, default=100, help='hidden state size')  # 原本是200 现改500
    parser.add_argument('--epoch', type=int, default=1, help='number of epochs to train for')  # 这里为了方便调试代码，我改成1了
    parser.add_argument('--lr', type=float, default=0.1, help='learning rate')
    # parser.add_argument('--l2', type=float, default=0.0001, help='l2 penalty')
    parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    parser.add_argument('--mlp_ratio', type=float, default=1, help='the ratio of hidden layers in the middle')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    parser.add_argument('--num_classes_last', type=int, default=7, help='the number of categories to the last label')
    parser.add_argument('--photo_size', type=int, default=128, help='the number of categories to the last label')
    parser.add_argument('--Linear_nums', type=int, default=3, help='the number of categories to the last label')
    parser.add_argument('--pb_path', type=str, default='./data', help='the number of categories to the last label')
    parser.add_argument('--state_nums', type=int, default=77, help='the number of categories to the last label')
    parser.add_argument('--gamma', type=float, default=0.99, help='the number of categories to the last label')
    parser.add_argument('--epsilon', type=float, default=0.8, help='the number of categories to the last label')
    parser.add_argument('--target_update_nums', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--least_score', type=float, default=0.8, help='the number of categories to the last label')
    parser.add_argument('--ReplayBuffer_capacity', type=int, default=100,
                        help='the number of categories to the last label')
    parser.add_argument('--min_size', type=int, default=50, help='the number of categories to the last label')
    parser.add_argument('--path_len', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--D', type=float, default=1.7, help='the number of categories to the last label')
    parser.add_argument('--a', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--model_name', type=str, default=str('Aes'), help='the number of categories to the last label')
    parser.add_argument('--mu', type=float, default=1, help='the number of categories to the last label')
    parser.add_argument('--embedding_dim', type=int, default=192, help='the number of categories to the last label')
    parser.add_argument('--msepara', type=int, default=100, help='the number of categories to the last label')
    parser.add_argument('--expand_name', type=str, default=str('embedding_dim'),
                        help='the number of categories to the last label')
    parser.add_argument('--is_adjust_parameter', type=str, default=str('true'),
                        help='the number of categories to the last label')
    parser.add_argument("--mse_threshold", type=float, default=0.01, help="MSE阈值，用于调整top_k")
    opt = parser.parse_args()
    main()
