import pandas as pd
import torch
import torch.optim as optim
import random
import argparse
import numpy as np
import torch.nn.functional as F
from scipy.stats import pearsonr
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
import math
import os

from RL_model import RL_model
from prepare_data import get_RL_data, collate_RL
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def set_random_seeds(seed=42):
    """
    设置所有随机数生成器的种子，确保可复现性
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

def train_RL(model, train_loader, test_loader):
    model.aes.train()  # 设置模型为训练模式
    optimizer = optim.Adam(model.aes.parameters(), lr=model.opt.lr)  # 初始化优化器

    for epoch in range(model.opt.model_epochs):  # 进行多个训练周期
        total_loss = 0  # 初始化总损失
        for img, name, labels in train_loader:  # 遍历训练数据
            img = img.to(device)  # 将图像移动到 GPU
            labels = labels.to(device)  # 将标签移动到 GPU

            # 计算模型输出
            scores = model.aes(img)
            # 计算损失（通常使用 MSE 或其他适当的损失函数）
            loss = model.aes_loss_func1(scores, labels)
            total_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    
    # 测试阶段
    mae, mse, rmse, r2, pea, mape, mpe = model.test(test_loader)
    mae, mse, rmse, r2, pea, mape, mpe = [metric.item() for metric in (mae, mse, rmse, r2, pea, mape, mpe)]
    print(f'预训练测试： MAE:{mae} MSE:{mse} RMSE:{rmse} R²:{r2} PEA:{pea} MAPE:{mape} MPE:{mpe}')

def calculate_embedding_dim(opt):
    embedding_dim = int((opt.photo_size - opt.Kernel_size1) // opt.Stride1 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int((embedding_dim - opt.Kernel_size2 + 2) // opt.Stride2 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    return int(embedding_dim * embedding_dim * 3)

# ------------------------- 多臂老虎机（MAB） -------------------------
class MultiArmedBanditUCB:
    def __init__(self, num_arms, alpha=1.0):
        """
        :param num_arms: 维度总数
        :param alpha: UCB探索系数，控制探索与利用的平衡
        """
        self.num_arms = num_arms
        self.alpha = alpha
        self.counts = np.zeros(num_arms)   # 记录每个维度被选择的次数
        self.values = np.zeros(num_arms)   # 记录每个维度的平均奖励
        self.total_t = 0                   # 总迭代次数
        self.previous_selected_dims = []   # 存储上一时刻选择的维度

    def select_dims(self):
        """
        动态决定选择哪些维度，不再限制固定数量
        方案示例：将每个维度的 UCB 值与整体平均 UCB 进行比较，
        如果某维度的 UCB >= 平均 UCB，则选择该维度
        """
        self.total_t += 1
        t = self.total_t
        ucb_scores = np.zeros(self.num_arms)
        for i in range(self.num_arms):
            if self.counts[i] == 0:
                ucb_scores[i] = float('inf')
            else:
                bonus = self.alpha * math.sqrt(math.log(t) / (self.counts[i]))
                ucb_scores[i] = self.values[i] + bonus

        avg_ucb = np.mean(ucb_scores[np.isfinite(ucb_scores)])
        selected_dims = [i for i in range(self.num_arms) if ucb_scores[i] >= avg_ucb]
        if len(selected_dims) == 0:
            selected_dims = [int(np.argmax(ucb_scores))]
        return selected_dims, ucb_scores

    def update(self, rewards_dict):
        """
        更新参与智能体的统计信息，每个维度分别更新
        :param rewards_dict: dict，键为维度索引，值为该维度获得的奖励
        """
        for arm, reward in rewards_dict.items():
            self.counts[arm] += 1
            n = self.counts[arm]
            old_value = self.values[arm]
            new_value = old_value + (reward - old_value) / n
            self.values[arm] = new_value

import numpy as np
import torch

def compute_scalar_reward(y_hat, labels, selected_dims_t, selected_dims_t_plus_1, model, lambda_acc=1.0, mu=0.1):
    """
    计算单一标量奖励：
    - 如果当前时刻和上一时刻均没有选中的维度，则返回 0；
    - 否则计算当前时刻被选中维度的预测误差平方均值 (R_acc)，并加上 GAT 相关性奖励 (R_rel)，最后扣除惩罚项 penalty。
    
    :param model: 训练好的 GAT 模型
    :param state_embed: 维度的嵌入表示
    :param dimension_adj_matrix: 维度间的邻接矩阵
    :param y_hat: 模型在当前样本的预测结果（1D tensor）
    :param labels: 真实标签（1D tensor）
    :param selected_dims_t: t 时刻选择的维度列表（上一时刻动作）
    :param selected_dims_t_plus_1: t+1 时刻选择的维度列表（当前时刻动作）
    :param lambda_acc: 误差奖励的权重
    :param mu: 惩罚项的权重
    :return: 计算出的奖励值
    """
    
    # 如果上一时刻和当前时刻均无选中的维度，则奖励为 0
    if (len(selected_dims_t) == 0) and (len(selected_dims_t_plus_1) == 0):
        return 0.0

    # 计算预测误差奖励 R_acc
    if len(selected_dims_t_plus_1) > 0:
        error_reductions = [(y_hat[i] - labels[i]).item()**2 for i in selected_dims_t_plus_1]
        R_acc = np.mean(error_reductions)
    else:
        R_acc = 0.0

    # 计算 GAT 相关性奖励 R_rel
    R_rel = 0.0
    if len(selected_dims_t_plus_1) > 1:  # 至少两个维度才能计算相关性
        with torch.no_grad():
            gat_repr = model.GAT(model.state_embed, model.dimension_adj_martix).detach().cpu().numpy()

        correlations = []
        for i in selected_dims_t_plus_1:
            for j in selected_dims_t_plus_1:
                if i != j:
                    v1, v2 = gat_repr[i], gat_repr[j]
                    corr = np.corrcoef(v1, v2)[0, 1]  # 计算 Pearson 相关系数
                    if not np.isnan(corr):
                        correlations.append(corr)
        if correlations:
            R_rel = np.mean(correlations)

    # 计算惩罚项 penalty
    penalty = mu * len(selected_dims_t_plus_1)

    # 计算最终奖励
    reward = lambda_acc * R_acc + R_rel - penalty

    return reward


# 修改 multi_arm_bandit 函数，结合 MAB 策略进行选择与更新，并采用单一标量奖励
def multi_arm_bandit(y_hat, labels, t, bandit, model, is_training=True):
    """
    使用多臂老虎机策略选择维度并更新奖励
    """
    if is_training:
        # 计算训练前整体 MSE（仅用于日志信息，如有需要可扩展）
        mse_before = torch.mean((y_hat - labels) ** 2).item()
        
        # 获取当前时刻的选择
        selected_dims, ucb_scores = bandit.select_dims()
        
        # 模拟人工标注：对被选择的维度，将预测值替换为真实标签
        y_hat_after = y_hat.clone()
        for dim in selected_dims:
            y_hat_after[dim] = labels[dim]
        mse_after = torch.mean((y_hat_after - labels) ** 2).item()
        
         # 对每个维度分别计算奖励
        reward_scalar = compute_scalar_reward(
            y_hat, 
            labels, 
            bandit.previous_selected_dims,  # t 时刻的选择
            selected_dims,                  # t+1 时刻的选择
            model,
            lambda_acc=1.0,
            mu=0.1
        )
        
        # 对所有维度分配相同奖励
        reward_dict = {arm: reward_scalar for arm in range(bandit.num_arms)}
        bandit.update(reward_dict)
        
        # 更新上一时刻的选择
        bandit.previous_selected_dims = selected_dims
    else:
        selected_dims, _ = bandit.select_dims()  # 测试时仅返回选中维度

    return selected_dims

def update_submodels(model, y_double_hat, labels, difficult_dims_list):
    total_submodel_loss = 0
    
    # 收集所有需要更新的维度
    all_dims = set()
    for dims in difficult_dims_list:
        all_dims.update(dims)
    
    if not all_dims:  # 如果没有需要更新的维度，直接返回
        return 0
    
    # 对每个需要更新的维度分别进行更新
    for dim in all_dims:
        # 创建该维度的 mask
        dim_mask = torch.zeros_like(y_double_hat).to(device)
        for i, difficult_dims in enumerate(difficult_dims_list):
            if dim in difficult_dims:
                dim_mask[i, dim] = 1
        
        # 使用新的计算图计算该维度的损失
        with torch.enable_grad():
            # 只计算当前维度的输出
            new_output = model.aes(model.aes.last_input)
            dim_output = new_output[:, dim:dim+1]  # 只取当前维度的输出
            dim_labels = labels[:, dim:dim+1]      # 只取当前维度的标签
            
            # 应用 mask
            masked_output = dim_output * dim_mask[:, dim:dim+1]
            masked_labels = dim_labels * dim_mask[:, dim:dim+1]
            
            # 计算该维度的损失
            dim_loss = calculate_loss(masked_output, masked_labels)
            total_submodel_loss += dim_loss.item()
            
            # 为当前维度的子模型创建优化器
            optimizer = optim.Adam(model.aes.submodels[dim].parameters(), lr=model.opt.lr)
            
            # 更新参数
            optimizer.zero_grad()
            dim_loss.backward()
            optimizer.step()
    
    return total_submodel_loss / len(all_dims)  # 返回平均损失

def train_aesthetic_model(opt):
    set_random_seeds(42)
    # 1. 加载数据集
    train_dataset, test_dataset = get_RL_data(opt.photo_size, opt.data_path)
    train_loader = DataLoader(train_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)

    # 初始化模型
    embedding_dim = calculate_embedding_dim(opt)
    aes_model = RL_model(opt, embedding_dim).to(device)

    # 步骤1：预训练（使用第一批 100 张图片）
    first_batch = random.sample(train_dataset, 100)
    first_images, first_names, first_labels = zip(*first_batch)
    first_images = torch.stack(first_images).to(device)
    first_labels = torch.stack([torch.tensor(label).to(device) for label in first_labels])
    pretrain_loader = DataLoader(list(zip(first_images, first_names, first_labels)),
                                 batch_size=opt.batchSize,
                                 shuffle=True)
    train_RL(aes_model, pretrain_loader, test_loader)

    # 记录已使用图片标识
    used_samples = set(first_names)

    # 步骤2：多臂老虎机训练（使用第二批 100 张新图片）
    second_batch = []
    for item in train_dataset:
        if item[1] not in used_samples and len(second_batch) < 100:
            second_batch.append(item)
            used_samples.add(item[1])
    second_images, second_names, second_labels = zip(*second_batch)
    second_images = torch.stack(second_images).to(device)
    second_labels = torch.stack([torch.tensor(label).to(device) for label in second_labels])
    with torch.no_grad():
        predictions = aes_model.aes(second_images)
    
    # 初始化多臂老虎机，num_classes 对应维度数量
    bandit = MultiArmedBanditUCB(num_arms=opt.num_classes, alpha=0.5)

    # 对第二批数据进行多臂老虎机训练
    for i in range(second_images.size(0)):
        _ = multi_arm_bandit(predictions[i], second_labels[i], i, bandit, aes_model, is_training=True)

    # 步骤3：采样训练（50 次，每次 100 张图片，20 张已用 + 80 张新）
    all_test_results = []
    for sample_idx in range(50):
        print(f"开始第 {sample_idx + 1}/50 次采样训练")
        current_batch = []
        used_samples_list = list(used_samples)
        selected_used = random.sample(used_samples_list, 20)
        for item in train_dataset:
            if item[1] in selected_used:
                current_batch.append(item)
        for item in train_dataset:
            if item[1] not in used_samples and len(current_batch) < 100:
                current_batch.append(item)
                used_samples.add(item[1])
        images, names, labels = zip(*current_batch)
        images = torch.stack(images).to(device)
        labels = torch.stack([torch.tensor(label).to(device) for label in labels])
        for epoch in range(20):
            total_loss = 0
            total_difficult_dims = 0
            y_hat = aes_model.aes(images)
            y_double_hat = y_hat.clone()
            difficult_dims_list = []
            for i in range(y_double_hat.size(0)):
                t = sample_idx * 20 + epoch
                difficult_dims = multi_arm_bandit(y_hat[i], labels[i], t, bandit, aes_model, is_training=True)
                difficult_dims_list.append(difficult_dims)
            loss = calculate_loss(y_double_hat, labels)
            total_loss += loss.item()
            optimizer = optim.Adam(aes_model.aes.parameters(), lr=opt.lr)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            submodel_loss = update_submodels(aes_model, y_double_hat, labels, difficult_dims_list)
            total_loss += submodel_loss
            total_difficult_dims += sum(len(d) for d in difficult_dims_list)
            if epoch == 19:
                mae, mse, rmse, r2 = calculate_metrics(y_double_hat, labels)
                avg_difficult_dims = total_difficult_dims / 100.0
                print(f'采样 [{sample_idx + 1}/50], MSE: {mse:.4f}, MAE: {mae:.4f}, '
                      f'RMSE: {rmse:.4f}, R²: {r2:.4f}, Avg. Difficult Dims: {avg_difficult_dims:.2f}, '
                      f'Submodel Loss: {submodel_loss:.4f}')
                test_results = evaluate_on_test_set(aes_model, test_loader)
                all_test_results.append(test_results)
                print(f'测试结果 - MAE: {test_results["MAE"]:.4f}, MSE: {test_results["MSE"]:.4f}, '
                      f'RMSE: {test_results["RMSE"]:.4f}, R²: {test_results["R2"]:.4f}')
    save_results_to_excel(all_test_results)

def evaluate_on_test_set(model, test_loader):
    model.eval()
    all_preds, all_labels = [], []
    with torch.no_grad():
        for img, name, labels in test_loader:
            img = img.to(device)
            labels = labels.to(device)
            preds = model.aes(img)
            all_preds.append(preds.cpu())
            all_labels.append(labels.cpu())
    all_preds = torch.cat(all_preds, dim=0)
    all_labels = torch.cat(all_labels, dim=0)
    mae, mse, rmse, r2 = calculate_metrics(all_preds, all_labels)
    return {'MAE': mae, 'MSE': mse, 'RMSE': rmse, 'R2': r2}

def save_results_to_excel(results, filename='test_results.xlsx'):
    if isinstance(results, list):
        df = pd.DataFrame(results)
    else:
        df = pd.DataFrame([results])
    try:
        df.to_excel(filename, index=False, header=True)
    except Exception as e:
        print(f"Error while saving to file: {e}")

def calculate_metrics(y_double_hat, labels):
    mae = torch.mean(torch.abs(y_double_hat - labels)).item()
    mse = torch.mean((y_double_hat - labels) ** 2)
    rmse = torch.sqrt(mse)
    r2 = 1 - (torch.sum((y_double_hat - labels) ** 2) / torch.sum((labels - torch.mean(labels)) ** 2)).item()
    return mae, mse.item(), rmse.item(), r2

def calculate_loss(y_double_hat, labels):
    return torch.nn.functional.mse_loss(y_double_hat, labels)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchSize', type=int, default=20, help='input batch size')
    parser.add_argument('--sample_size', type=int, default=20, help='sample size')
    parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')
    parser.add_argument('--hidden_dim', type=int, default=100, help='hidden state size')
    parser.add_argument('--epochs', type=int, default=5, help='number of epochs to train for')
    parser.add_argument('--AL_epochs', type=int, default=5, help='number of epochs for AL training')
    parser.add_argument('--query_num', type=int, default=100, help='query number')
    parser.add_argument('--model_epochs', type=int, default=20, help='number of epochs to train for')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--num_layers', type=int, default=6, help='number of layers')
    parser.add_argument('--num_heads', type=int, default=6, help='number of attention heads')
    parser.add_argument('--mlp_ratio', type=float, default=1, help='ratio for hidden layers')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='first conv kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='second conv kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='first conv stride')
    parser.add_argument('--Stride2', type=int, default=2, help='second conv stride')
    parser.add_argument('--num_classes', type=int, default=77, help='number of classes/dimensions')
    parser.add_argument('--photo_size', type=int, default=128, help='photo size')
    parser.add_argument('--Linear_nums', type=int, default=3, help='number of linear layers')
    parser.add_argument('--data_path', type=str, default='./data', help='data path')
    parser.add_argument('--agent_nums', type=int, default=77, help='number of agents')
    parser.add_argument('--gamma', type=float, default=0.99, help='gamma')
    parser.add_argument('--epsilon', type=float, default=0.8, help='epsilon')
    parser.add_argument('--target_update_nums', type=int, default=4, help='target update numbers')
    parser.add_argument('--ReplayBuffer_capacity', type=int, default=80, help='ReplayBuffer capacity')
    parser.add_argument('--min_size', type=int, default=60, help='minimum size')
    parser.add_argument('--path_len', type=int, default=5, help='path length')
    parser.add_argument('--D', type=float, default=1.7, help='D parameter')
    parser.add_argument('--a', type=int, default=5, help='a parameter')
    parser.add_argument('--model_name', type=str, default='Aes', help='model name')
    parser.add_argument('--mu', type=float, default=1, help='mu parameter')
    parser.add_argument('--embedding_dim', type=int, default=200, help='embedding dimension')
    parser.add_argument('--msepara', type=int, default=1, help='mse parameter')
    parser.add_argument('--maepara', type=int, default=100, help='mae parameter')
    parser.add_argument('--expand_name', type=str, default='embedding_dim', help='expand name')
    parser.add_argument('--is_adjust_parameter', type=str, default='false', help='adjust parameter flag')
    parser.add_argument('--CNN_USE_TRANSFORM', type=str, default='true', help='CNN use transform flag')
    parser.add_argument('--Submodel_USE_TRANSFORM', type=str, default='true', help='Submodel use transform flag')
    parser.add_argument('--Save_data', type=str, default='false', help='save data flag')
    parser.add_argument('--Set_data', type=str, default='false', help='set data flag')
    parser.add_argument('--Save_dimension_chosen_nums', type=str, default='false', help='save dimension chosen flag')
    parser.add_argument('--state_type', type=str, default='0', help='state type')
    parser.add_argument('--reward_type', type=str, default='0', help='reward type')
    parser.add_argument('--singleCapacity', type=int, default=200, help='single capacity')
    parser.add_argument('--FocalLoss_alpha', type=float, default=0.75, help='FocalLoss alpha')
    parser.add_argument('--FocalLoss_gamma', type=float, default=3, help='FocalLoss gamma')
    parser.add_argument('--OUTPUT_TENSORS', type=str, default='True', help='output tensors flag')

    opt = parser.parse_args()
    train_aesthetic_model(opt)
