import numpy as np
import torch
import torch.nn.functional as F
from prepare_data import get_RL_data  # 从 prepare_data 中加载数据
import argparse
import os
import csv

class MultiArmBandit:
    def __init__(self, num_arms, method="UCB"):
        """
        初始化多臂老虎机
        :param num_arms: int, 美学维度的数量，每个维度对应一个“臂”
        :param method: str, 选择策略，默认为 UCB
        """
        self.num_arms = num_arms
        self.method = method
        self.counts = np.zeros(num_arms)  # 每个臂的选择次数
        self.values = np.zeros(num_arms)  # 每个臂的平均奖励值
        self.alpha = np.ones(num_arms)  # Beta 分布的 alpha 参数
        self.beta = np.ones(num_arms)  # Beta 分布的 beta 参数

    def select_arm(self):
        """
        根据策略选择一个臂
        :return: int, 选择的臂的索引
        """
        if self.method == "UCB":
            total_counts = np.sum(self.counts)
            if total_counts < self.num_arms:
                return int(total_counts)  # 确保每个臂至少被选择一次
            ucb_values = self.values + np.sqrt(2 * np.log(total_counts) / (self.counts + 1e-5))
            return int(np.argmax(ucb_values))
        else:
            raise ValueError(f"不支持的选择策略: {self.method}")

    def update(self, arm, reward):
        """
        更新选中臂的奖励
        :param arm: int, 被选中的臂的索引
        :param reward: float, 获得的奖励
        """
        self.counts[arm] += 1
        n = self.counts[arm]
        value = self.values[arm]
        self.values[arm] = ((n - 1) / n) * value + (1 / n) * reward

        # 更新 Beta 分布的参数
        if reward > 0:
            self.alpha[arm] += 1
        else:
            self.beta[arm] += 1

    def select_optimal_arms(self, top_k=10):
        """
        选择奖励最高的 top_k 个臂
        :param top_k: int, 要选择的臂的数量
        :return: list, 最优臂的索引
        """
        return list(np.argsort(self.values)[-top_k:][::-1])


def load_aesthetic_data(photo_size):
    """
    使用 get_RL_data 函数加载数据集，仅提取美学维度部分。
    :param photo_size: int, 输入图像的尺寸
    :return: tuple, (训练集美学维度, 测试集美学维度)
    """
    train_data, test_data = get_RL_data(photo_size)

    # 提取每个样本的美学维度部分
    train_set = [sample[2] for sample in train_data]  # 获取每个样本的美学维度
    test_set = [sample[2] for sample in test_data]

    train_set = np.array(train_set, dtype=np.float32)
    test_set = np.array(test_set, dtype=np.float32)

    return train_set, test_set


def calculate_acc(predictions, labels, threshold=0.1):
    """
    计算准确率（ACC）
    :param predictions: 预测值
    :param labels: 真实值
    :param threshold: 误差阈值，当预测值与真实值之差小于该值时认为预测正确
    :return: float, 准确率
    """
    correct = np.abs(predictions - labels) < threshold
    return np.mean(correct)  # 返回正确预测的比例


def calculate_mae(predictions, labels):
    """
    计算平均绝对误差（MAE）
    :param predictions: 预测值
    :param labels: 真实值
    :return: float, MAE
    """
    return np.mean(np.abs(predictions - labels))


def calculate_contextual_reward(arm_data, arm_labels, context_features):
    """
    计算基于上下文的奖励
    :param arm_data: 当前臂的数据
    :param arm_labels: 当前臂的真实标签
    :param context_features: 上下文特征（例如，图像的颜色、形状等）
    :return: float, 奖励
    """
    mse = calculate_mae(arm_data, arm_labels)
    # 使用上下文特征影响奖励，例如对奖励进行加权
    contextual_factor = np.dot(context_features, np.ones_like(context_features))  # 示例：通过内积来反映上下文影响
    return 1 / (mse + 1e-5) * contextual_factor


def calculate_exploration_reward(arm_data, arm_labels, exploration_factor=0.1):
    """
    计算探索奖励
    :param arm_data: 当前臂的数据
    :param arm_labels: 当前臂的真实标签
    :param exploration_factor: 探索奖励的加权因子
    :return: float, 探索奖励
    """
    mse = calculate_mae(arm_data, arm_labels)
    exploration_bonus = exploration_factor / (1 + mse)  # 给予探索的奖励
    return 1 / (mse + 1e-5) + exploration_bonus


def train_model_with_bandit(opt):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 加载数据
    train_set, test_set = load_aesthetic_data(opt.photo_size)

    # 截取训练集，只保留 861 个样本
    train_set = train_set[:861, :]  # 截取训练集的前 861 个样本

    # 打印数据的大小，检查样本数量是否一致
    print(f"train_set size: {train_set.shape}")
    print(f"test_set size: {test_set.shape}")

    num_arms = train_set.shape[1]  # 每个美学维度作为一个“臂”

    # 初始化多臂老虎机模型
    bandit_model = MultiArmBandit(num_arms=num_arms, method="UCB")
    criterion = F.mse_loss  # 计算MSE

    # 记录每个美学维度被选择的次数
    total_selections = np.zeros(num_arms)

    for epoch in range(opt.epochs):
        total_mse = []
        total_acc = []
        total_mae = []

        for arm in range(num_arms):
            # 获取当前维度的数据
            arm_data = torch.tensor(train_set[:, arm]).float().to(device)
            arm_labels = torch.tensor(test_set[:, arm]).float().to(device)

            # 确保 arm_data 和 arm_labels 的样本数量一致
            assert arm_data.size(0) == arm_labels.size(0), f"样本数量不一致：arm_data({arm_data.size(0)}), arm_labels({arm_labels.size(0)})"

            # 将 arm_data 调整为 3 通道并增加尺寸到 (2, 2)
            arm_data = arm_data.view(-1, 1, 1, 1).expand(-1, 3, -1, -1)
            arm_data = F.interpolate(arm_data, size=(16, 16), mode='bilinear', align_corners=False)

            # 扩展 arm_labels 为与 arm_data 相同的尺寸
            arm_labels = arm_labels.unsqueeze(1).unsqueeze(2).unsqueeze(3)  # (batch_size, 1, 1, 1)
            arm_labels = arm_labels.expand_as(arm_data)  # 扩展为与 arm_data 一样的大小

            # 计算MSE损失
            mse_loss = criterion(arm_data, arm_labels)

            # 记录该臂的MSE并计算奖励（使用MSE的倒数）
            mse = mse_loss.item()
            total_mse.append(mse)
            reward = 1 / (mse + 1e-5)  # 防止MSE过小导致奖励过大，加入一个小的常数值避免除零
            bandit_model.update(arm, reward)  # 更新多臂老虎机模型，选择最优维度集合

            # 更新选择次数
            total_selections[arm] += 1

            # 计算 ACC 和 MAE
            arm_preds = arm_data.squeeze().cpu().numpy()
            arm_labels = arm_labels.squeeze().cpu().numpy()

            acc = calculate_acc(arm_preds, arm_labels)
            mae = calculate_mae(arm_preds, arm_labels)

            total_acc.append(acc)
            total_mae.append(mae)

        # 打印每轮的平均MSE、ACC和MAE
        avg_mse = np.mean(total_mse)
        avg_acc = np.mean(total_acc)
        avg_mae = np.mean(total_mae)

        print(f"Epoch {epoch + 1}/{opt.epochs}, Average MSE: {avg_mse}, Average ACC: {avg_acc}, Average MAE: {avg_mae}")

    # 计算每个维度（臂）需要被标注的次数（平均一个维度需要被选择多少次）
    avg_selections_per_arm = total_selections / opt.epochs
    print(f"每个美学维度平均需要被标注的次数：{avg_selections_per_arm}")

    # 选择最优维度子集
    optimal_arms = bandit_model.select_optimal_arms(top_k=10)
    print(f"选出的最优维度集合（臂）: {optimal_arms}")
    # 保存最优维度数据
    save_optimal_arms_data(optimal_arms, train_set)



if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="训练CMAB模型")
    parser.add_argument("--epochs", type=int, default=100, help="训练的epochs数量")
    parser.add_argument("--photo_size", type=int, default=32, help="输入图像的尺寸")
    opt = parser.parse_args()

    train_model_with_bandit(opt)
