import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from prepare_data import *
import argparse
import torch
import torch.nn as nn
from PIL import Image
from scipy.stats import pearsonr
from model import AesModel
from torch.utils.data import DataLoader
from torch.utils.data import Subset
from multiarmutils import *



class MultiArmedBanditGFS:
    def __init__(self, arms, exploration_ratio, reward_function, initial_values, penalty_factor, aes_model):
        """
        arms 是一个包含审美维度（如情感反应、情绪状态等）的列表
        arms = ["审美愉悦", "好奇心", "快乐"] 等等
        """
        self.arm_names = arms  # 存储审美维度的名字
        self.arm_map = {arm: idx for idx, arm in enumerate(arms)}  # 创建字符串到整数的映射
        self.epsilon = exploration_ratio  # 探索比例（epsilon-greedy）
        self.reward_function = reward_function  # 奖励函数
        self.penalty_factor = penalty_factor  # 惩罚因子
        self.aes_model = aes_model  # 传入美学模型

        # 初始化 Beta 分布的 α 和 β 参数
        self.alpha = np.array([1.0 + value for value in initial_values])  # α参数初始化为初始值
        self.beta = np.ones(len(initial_values))  # β参数初始化为1

        # 初始化 last_mse 为字典，用来存储每个臂的历史 MSE
        self.last_mse = {i: float('inf') for i in range(len(initial_values))}  # 每个维度的MSE初始化为无穷大

    def generative_oracle(self, available_arms=None):
        if available_arms is None:
            available_arms = []

        # 计算每个维度的成功概率 (alpha / (alpha + beta))
        arm_probabilities = self.alpha / (self.alpha + self.beta)  # 计算每个维度的选择概率
        sampled_probs = np.random.beta(self.alpha, self.beta)  # 使用 Beta 分布进行采样

        K = np.random.randint(1, len(available_arms) + 1)  # 动态选择标注的维度数量
        selected_arms = np.argsort(sampled_probs)[-K:]  # 选择概率最高的K个维度

        # 确保 selected_arms 是整数类型
        selected_arms = selected_arms.astype(int)


        return selected_arms, K

    def update_beta_parameters(self, selected_arms, reward_t, reward_t1):
        # 遍历 selected_arms 中的每个维度，逐个更新 β 参数
        for arm in selected_arms:
            if isinstance(arm, int):  # 确保 arm 是整数
                if reward_t > reward_t1:  # 如果奖励增加，更新 α
                    self.alpha[arm] += 1
                else:  # 如果奖励减少，更新 β
                    self.beta[arm] += 1

    def calculate_mse(self, predicted_ability, true_ability):
        return np.mean((predicted_ability - true_ability) ** 2)

    def run(self, iterations, test_loader):
        # 初始化记录
        dim_score, dim_label = {}, {}
        dim_mse_history = {}

        for i in range(iterations):
            num_visits = 0
            for img, name, labels in test_loader:
                # 遍历当前批次的样本
                for idx in range(len(img)):
                    one_picture = img[idx].unsqueeze(0).cuda()

                    # 如果当前图片所属维度未初始化，初始化相关信息
                    if name[idx] not in dim_score:
                        dim_score[name[idx]] = []
                        dim_label[name[idx]] = []
                        dim_mse_history[name[idx]] = []

                    # 使用模型预测美学能力
                    aes_ability, _ = self.aes_model(one_picture)
                    min_val, max_val = aes_ability.min(), aes_ability.max()

                    # Min-Max归一化到 [0.2, 0.8]
                    aes_ability = (aes_ability - min_val) * (0.8 - 0.2) / (max_val - min_val) + 0.2

                    # 添加分数和真实标签
                    dim_score[name[idx]].append(aes_ability.cpu().detach().numpy())
                    dim_label[name[idx]].append(labels[idx].numpy())

                    # 计算MSE并记录
                    mse = self.calculate_mse(aes_ability.cpu().detach().numpy(), labels[idx].numpy())
                    dim_mse_history[name[idx]].append(mse)

                num_visits += len(img)
                if num_visits >= len(test_loader) * test_loader.batch_size:
                    break  # 控制循环避免超过指定次数

            # 根据历史MSE更新奖励
            total_reward = 0
            for arm in dim_mse_history:
                if len(dim_mse_history[arm]) > 0:
                    avg_mse = np.mean(dim_mse_history[arm])
                    reward_t = self.reward_function(self.last_mse.get(arm, float('inf')), avg_mse, None, 1, self.penalty_factor)
                    total_reward += reward_t  # 汇总奖励
                    self.update_beta_parameters([arm], reward_t, self.last_mse.get(arm, float('inf')))
                    self.last_mse[arm] = avg_mse  # 更新维度的最后MSE

            # 在每轮中，动态选择需要人工标注的维度和数量
            available_arms = list(dim_mse_history.keys())
            selected_arms, K = self.generative_oracle(available_arms=available_arms)
            print(f"Round {i+1}: Selected {K} arms -> {selected_arms}")

        # 汇总所有维度的分数和标签
        all_scores = np.concatenate([np.array(dim_score[arm]).flatten() for arm in dim_score])
        all_labels = np.concatenate([np.array(dim_label[arm]).flatten() for arm in dim_label])

        # 使用评估函数计算最终指标
        mae, mse, rmse = evaluate(torch.tensor(all_scores), torch.tensor(all_labels))
        print(f'Final Evaluation -> MAE: {mae}, MSE: {mse}, RMSE: {rmse}')

        # 返回最终选择的维度
        return selected_arms