import pandas as pd
import torch
import torch.optim as optim
import random
import argparse
from RL_model import RL_model
from prepare_data import get_RL_data, collate_RL
import numpy as np
import torch.nn.functional as F
from scipy.stats import pearsonr
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import argparse
from RL_model import RL_model
from prepare_data import get_RL_data, collate_RL
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
import random
import argparse
import torch.nn.functional as F
from RL_model import RL_model
from scipy.stats import pearsonr  # 添加相关性计算的库
import numpy as np
# 假设 get_data 函数可以加载数据集
from prepare_data import get_RL_data, collate_RL
import os

def set_random_seeds(seed=42):
    """
    设置所有随机数生成器的种子
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

def train_RL(model, train_loader, test_loader):
    model.aes.train()  # 设置模型为训练模式
    optimizer = optim.Adam(model.aes.parameters(), lr=model.opt.lr)  # 初始化优化器

    for epoch in range(model.opt.model_epochs):  # 进行多个训练周期
        total_loss = 0  # 初始化总损失
        for img, name, labels in train_loader:  # 遍历训练数据
            img = img.cuda()  # 将图像移动到 GPU
            labels = labels.cuda()  # 将标签移动到 GPU

            # 计算模型输出
            scores = model.aes(img)  # 计算模型输出

            # 计算损失
            loss = model.aes_loss_func1(scores, labels)  # 使用适当的损失函数
            total_loss += loss.item()  # 累加损失

            optimizer.zero_grad()  # 清空梯度
            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

        # # 计算评估指标
        # mae, mse, rmse, r2 = calculate_metrics(scores, labels)  # 计算评估指标
        # print(f'Epoch [{epoch + 1}/{model.opt.model_epochs}], Loss: {total_loss / len(train_loader):.4f}, MAE: {mae:.4f}, MSE: {mse:.4f}, RMSE: {rmse:.4f}, R²: {r2:.4f}')  # 打印每个周期的平均损失和评估指标

    # 在测试集上评估模型
    mae, mse, rmse, r2, pea, mape, mpe=model.test(test_loader)  # 调用测试函数
    mae, mse, rmse, r2, pea, mape, mpe = [metric.item() for metric in (mae, mse, rmse, r2, pea, mape, mpe)]

    print(f'预训练测试： MAE:{mae} MSE:{mse} RMSE:{rmse} R²:{r2} PEA:{pea} MAPE:{mape} MPE:{mpe}')

def calculate_embedding_dim(opt):
    embedding_dim = int((opt.photo_size - opt.Kernel_size1) // opt.Stride1 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    embedding_dim = int((embedding_dim - opt.Kernel_size2 + 2) // opt.Stride2 + 1)
    embedding_dim = int((embedding_dim - 2) // 2 + 1)
    return int(embedding_dim * embedding_dim * 3)

import math

# 添加新的多臂老虎机类
class MultiArmedBanditUCB:
    def __init__(self, num_arms, alpha=0.5, lambda_acc=1.0, mu_pen=0.1):
        self.num_arms = num_arms
        self.alpha = alpha
        self.lambda_acc = lambda_acc
        self.mu_pen = mu_pen
        
        self.counts = np.zeros(num_arms)
        self.values = np.zeros(num_arms)
        self.previous_selections = set()
        self.current_selections = set()
        
    def select_arm(self, t):
        """
        修改选择策略，减少过度探索
        """
        self.previous_selections = self.current_selections.copy()
        
        # 计算 UCB 值，减小探索项的权重
        ucb_values = self.values + self.alpha * np.sqrt(np.log(t + 1) / (self.counts + 1e-6))  # 移除了2倍系数
        
        if t < self.num_arms // 2:  # 减少初始探索阶段的长度
            selected = [t]
        else:
            sorted_arms = np.argsort(ucb_values)[::-1]
            
            # 使用更严格的选择标准
            threshold = np.percentile(ucb_values, 85)  # 提高选择阈值
            selected = []
            for arm in sorted_arms:
                if ucb_values[arm] > threshold:
                    selected.append(arm)
                if len(selected) >= 5:  # 限制最大选择数量
                    break
            
            # 确保至少选择一个维度
            if not selected:
                selected = [sorted_arms[0]]
        
        self.current_selections = set(selected)
        return selected
    
    def predict(self, values):
        """
        用于测试阶段，只基于学习到的价值进行预测
        """
        # 直接使用学习到的价值选择维度
        sorted_arms = np.argsort(self.values)[::-1]
        
        # 选择价值显著高于平均值的维度
        mean_value = np.mean(self.values)
        selected = []
        for arm in sorted_arms:
            if self.values[arm] > mean_value:
                selected.append(arm)
            else:
                break
        
        # 确保至少选择一个维度
        if not selected:
            selected = [sorted_arms[0]]
            
        return selected
    
    def update(self, chosen_arms, mse_before, mse_after, gat_representations):
        # 计算准确度提升奖励
        R_acc = mse_before - mse_after
        
        # 计算相关性奖励
        R_rel = 0
        if len(chosen_arms) > 1:
            correlations = []
            chosen_arms = list(chosen_arms)
            for i in range(len(chosen_arms)):
                for j in range(i+1, len(chosen_arms)):
                    val1 = gat_representations[chosen_arms[i]]
                    val2 = gat_representations[chosen_arms[j]]
                    correlation = 1 - abs(val1 - val2) / (abs(val1) + abs(val2) + 1e-6)
                    correlations.append(correlation)
            
            if correlations:
                R_rel = np.mean(correlations)
        
        # 计算基础总奖励
        R_base = self.lambda_acc * R_acc - self.mu_pen * (len(chosen_arms) / self.num_arms)
        
        # 根据奖励分配策略分配奖励
        participating_arms = set()
        
        # 1. 在t和t+1时刻都被选择的维度
        both_selected = self.previous_selections.intersection(self.current_selections)
        participating_arms.update(both_selected)
        
        # 2. 在t+1时刻新选择的维度
        newly_selected = self.current_selections - self.previous_selections
        participating_arms.update(newly_selected)
        
        # 3. 在t+1时刻被取消选择的维度
        deselected = self.previous_selections - self.current_selections
        participating_arms.update(deselected)
        
        # 如果有参与的维度，分配奖励
        if participating_arms:
            reward_per_arm = R_base / len(participating_arms)
            
            # 更新参与维度的价值估计
            for arm in participating_arms:
                self.counts[arm] += 1
                
                # 根据维度的参与方式调整奖励
                if arm in both_selected:
                    adjustment = 1.2  # 连续选择的维度获得更高奖励
                elif arm in newly_selected:
                    adjustment = 1.0  # 新选择的维度获得标准奖励
                else:  # deselected
                    adjustment = 0.8  # 取消选择的维度获得较低奖励
                
                # 计算最终奖励（包含相关性惩罚）
                if arm in self.current_selections and R_rel > 0.8:
                    correlation_penalty = -0.2 * R_rel
                else:
                    correlation_penalty = 0
                    
                final_reward = reward_per_arm * adjustment + correlation_penalty
                
                # 使用指数移动平均更新价值估计
                lr = 1.0 / self.counts[arm]
                self.values[arm] = (1 - lr) * self.values[arm] + lr * final_reward

# 修改 multi_arm_bandit 函数，使用预测值作为 GAT 表征
def multi_arm_bandit(y_hat, labels, t, bandit, is_training=True):
    """
    使用改进的UCB算法选择需要人工标注的维度。
    """
    if is_training:
        # 训练阶段：计算MSE并更新多臂老虎机
        mse_before = torch.mean((y_hat - labels) ** 2).item()
        difficult_dims = bandit.select_arm(t)
        
        y_hat_after = y_hat.clone()
        for dim in difficult_dims:
            y_hat_after[dim] = labels[dim]
        mse_after = torch.mean((y_hat_after - labels) ** 2).item()
        
        gat_representations = y_hat.detach().cpu().numpy().reshape(-1)
        bandit.update(difficult_dims, mse_before, mse_after, gat_representations)
    else:
        # 测试阶段：只进行预测
        difficult_dims = bandit.predict(y_hat.detach().cpu().numpy().reshape(-1))
    
    return difficult_dims

def update_submodels(model, y_double_hat, labels, difficult_dims_list):
    submodel_loss = 0
    for i, difficult_dims in enumerate(difficult_dims_list):
        # 构建一个矩阵，其中真实值设置为1，预测值设置为0
        mask = torch.zeros_like(y_double_hat[i])  # 创建一个与 y_double_hat[i] 相同形状的零矩阵
        mask[difficult_dims] = 1  # 将需要人工标注的维度设置为1

        # 计算子模型的损失
        loss = calculate_loss(y_double_hat[i] * mask, labels[i] * mask)  # 只计算需要更新的部分
        
        # 打印损失以检查
        # print(f"Submodel {i}, Loss before update: {loss.item()}")  # 打印当前损失

        submodel_loss += loss.item()  # 累加子模型损失

        # 这里可以添加子模型的更新逻辑
        # 例如：model.submodel.update(...)

    # 打印总损失以检查
    # print(f"Total submodel loss: {submodel_loss}")  # 打印总损失
    return submodel_loss

def train_aesthetic_model(opt):
    # 设置随机种子
    set_random_seeds(42)
    
    # 1. 加载数据集并划分为训练集和测试集
    train_dataset, test_dataset = get_RL_data(opt.photo_size, opt.data_path)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=opt.batchSize, collate_fn=collate_RL, pin_memory=True)

    # 初始化模型
    embedding_dim = calculate_embedding_dim(opt)
    aes_model = RL_model(opt, embedding_dim).cuda()
    
    # 步骤1：预训练 - 使用第一批100张图片
    first_batch = random.sample(train_dataset, 100)
    first_images, first_names, first_labels = zip(*first_batch)
    first_images = torch.stack(first_images).cuda()
    first_labels = torch.stack([torch.tensor(label).cuda() for label in first_labels])
    
    # 预训练
    pretrain_loader = DataLoader(list(zip(first_images, first_names, first_labels)), 
                               batch_size=opt.batchSize, 
                               shuffle=True)
    train_RL(aes_model, pretrain_loader, test_loader)
    
    # 记录已使用的图片
    used_samples = set(first_names)  # 使用图片名称作为标识
    
    # 步骤2：使用第二批100张新图片进行多臂老虎机训练
    second_batch = []
    for item in train_dataset:
        if item[1] not in used_samples and len(second_batch) < 100:  # item[1]是图片名称
            second_batch.append(item)
            used_samples.add(item[1])
    
    second_images, second_names, second_labels = zip(*second_batch)
    second_images = torch.stack(second_images).cuda()
    second_labels = torch.stack([torch.tensor(label).cuda() for label in second_labels])
    
    # 获取模型预测
    with torch.no_grad():
        predictions = aes_model.aes(second_images)
    
    # 初始化改进的多臂老虎机，使用更小的alpha值
    bandit = MultiArmedBanditUCB(num_arms=opt.num_classes, 
                                alpha=0.5,  # 从1.0降低到0.5
                                lambda_acc=1.0,
                                mu_pen=0.1)
    
    # 训练多臂老虎机
    for i in range(second_images.size(0)):
        difficult_dims = multi_arm_bandit(predictions[i], 
                                        second_labels[i], 
                                        i, 
                                        bandit)
    
    # 步骤3：进行50次采样训练，每次100张图片（20张已用，80张新图片）
    all_test_results = []
    
    for sample_idx in range(50):
        print(f"开始第 {sample_idx + 1}/50 次采样训练")
        
        # 构建新的采样批次：20张已用图片 + 80张新图片
        current_batch = []
        
        # 添加20张已用图片
        used_samples_list = list(used_samples)
        selected_used = random.sample(used_samples_list, 20)
        for item in train_dataset:
            if item[1] in selected_used:
                current_batch.append(item)
        
        # 添加80张新图片
        for item in train_dataset:
            if item[1] not in used_samples and len(current_batch) < 100:
                current_batch.append(item)
                used_samples.add(item[1])
        
        # 处理当前批次的数据
        images, names, labels = zip(*current_batch)
        images = torch.stack(images).cuda()
        labels = torch.stack([torch.tensor(label).cuda() for label in labels])
        
        # 对这个采样批次进行20个epoch的训练
        for epoch in range(20):
            total_loss = 0
            total_difficult_dims = 0

            # 使用 AesModel 进行预测
            y_hat = aes_model.aes(images)
            y_double_hat = y_hat.clone()
            difficult_dims_list = []

            for i in range(y_double_hat.size(0)):
                t = sample_idx * 20 + epoch
                difficult_dims = multi_arm_bandit(y_hat[i], 
                                                labels[i], 
                                                t,
                                                bandit)
                difficult_dims_list.append(difficult_dims)
            
            loss = calculate_loss(y_double_hat, labels)
            total_loss += loss.item()

            optimizer = optim.Adam(aes_model.aes.parameters(), lr=opt.lr)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            submodel_loss = update_submodels(aes_model, y_double_hat, labels, difficult_dims_list)
            total_loss += submodel_loss

            total_difficult_dims += sum(len(d) for d in difficult_dims_list)

            # 只在最后一个epoch打印训练信息
            if epoch == 19:
                mae, mse, rmse, r2 = calculate_metrics(y_double_hat, labels)
                avg_difficult_dims = total_difficult_dims / 100  # 因为每次采样100张图片
                print(f'采样 [{sample_idx + 1}/50], MSE: {mse:.4f}, MAE: {mae:.4f}, RMSE: {rmse:.4f}, R²: {r2:.4f}, Avg. Difficult Dims: {avg_difficult_dims:.2f}')

                # 在最后一个epoch完成后进行测试
                test_results = evaluate_on_test_set(aes_model, test_loader)
                all_test_results.append(test_results)
                print(f'测试结果 - MAE: {test_results["MAE"]:.4f}, MSE: {test_results["MSE"]:.4f}, RMSE: {test_results["RMSE"]:.4f}, R²: {test_results["R2"]:.4f}')

    # 保存结果
    save_results_to_excel(all_test_results)

def evaluate_on_test_set(model, test_loader):
    model.eval()  # 设置为评估模式
    all_preds, all_labels = [], []

    with torch.no_grad():
        for img, name, labels in test_loader:
            img = img.cuda()
            labels = labels.cuda()

            preds = model.aes(img)
            all_preds.append(preds.cpu())
            all_labels.append(labels.cpu())

    all_preds = torch.cat(all_preds, dim=0)
    all_labels = torch.cat(all_labels, dim=0)

    mae, mse, rmse, r2 = calculate_metrics(all_preds, all_labels)
    return {'MAE': mae, 'MSE': mse, 'RMSE': rmse, 'R2': r2}

import pandas as pd

def save_results_to_excel(results, filename='test_results.xlsx'):
    # 将结果转换为 DataFrame
    if isinstance(results, list):
        df = pd.DataFrame(results)
    else:
        df = pd.DataFrame([results])

    # 直接保存结果到 Excel 文件，覆盖原文件
    try:
        df.to_excel(filename, index=False, header=True)
    except Exception as e:
        print(f"Error while saving to file: {e}")

def calculate_metrics(y_double_hat, labels):
    mae = torch.mean(torch.abs(y_double_hat - labels)).item()
    mse = torch.mean((y_double_hat - labels) ** 2)  # 保持 mse 为 Tensor
    rmse = torch.sqrt(mse)  # 因为 mse 是 Tensor 类型，这里可以正常调用 sqrt()
    r2 = 1 - (torch.sum((y_double_hat - labels) ** 2) / torch.sum((labels - torch.mean(labels)) ** 2)).item()
    return mae, mse.item(), rmse.item(), r2  # 返回 mse 和 rmse 时转换为 float

def calculate_loss(y_double_hat, labels):
    return torch.nn.functional.mse_loss(y_double_hat, labels)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # 添加所有必要的参数
    parser.add_argument('--batchSize', type=int, default=20, help='input batch size')
    parser.add_argument('--sample_size', type=int, default=20, help='input batch size')
    parser.add_argument('--hidden_size', type=int, default=200, help='hidden state size')
    parser.add_argument('--hidden_dim', type=int, default=100, help='hidden state size')
    parser.add_argument('--epochs', type=int, default=5, help='number of epochs to train for')
    parser.add_argument('--AL_epochs', type=int, default=5, help='number of epochs to train for')
    parser.add_argument('--query_num', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--model_epochs', type=int, default=20, help='number of epochs to train for')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--num_layers', type=int, default=6, help='layers nums')
    parser.add_argument('--num_heads', type=int, default=6, help='attention heads nums')
    parser.add_argument('--mlp_ratio', type=float, default=1, help='the ratio of hidden layers in the middle')
    parser.add_argument('--Kernel_size1', type=int, default=2, help='the first layer convolution kernel size')
    parser.add_argument('--Kernel_size2', type=int, default=2, help='the second layer convolution kernel size')
    parser.add_argument('--Stride1', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--Stride2', type=int, default=2, help='the second layer convolution stride size')
    parser.add_argument('--num_classes', type=int, default=77, help='the number of categories')
    parser.add_argument('--photo_size', type=int, default=128, help='the number of categories to the last label')
    parser.add_argument('--Linear_nums', type=int, default=3, help='the number of categories to the last label')
    parser.add_argument('--data_path', type=str, default='./data', help='the number of categories to the last label')
    parser.add_argument('--agent_nums', type=int, default=77, help='the number of categories to the last label')
    parser.add_argument('--gamma', type=float, default=0.99, help='the number of categories to the last label')
    parser.add_argument('--epsilon', type=float, default=0.8, help=
                        'the number of categories to the last label')
    parser.add_argument('--target_update_nums', type=int, default=4, help='the number of categories to the last label')
    parser.add_argument('--ReplayBuffer_capacity', type=int, default=80, help='the number of categories to the last label')
    parser.add_argument('--min_size', type=int, default=60, help='the number of categories to the last label')
    parser.add_argument('--path_len', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--D', type=float, default=1.7, help='the number of categories to the last label')
    parser.add_argument('--a', type=int, default=5, help='the number of categories to the last label')
    parser.add_argument('--model_name', type=str, default=str('Aes'), help='the number of categories to the last label')
    parser.add_argument('--mu', type=float, default=1, help='the number of categories to the last label')
    parser.add_argument('--embedding_dim', type=int, default=200, help='the number of categories to the last label')
    parser.add_argument('--msepara', type=int, default=1, help='the number of categories to the last label')
    parser.add_argument('--maepara', type=int, default=100, help='the number of categories to the last label')
    parser.add_argument('--expand_name', type=str, default=str('embedding_dim'), help='the number of categories to the last label')
    parser.add_argument('--is_adjust_parameter', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--CNN_USE_TRANSFORM', type=str, default=str('true'), help='the number of categories to the last label')
    parser.add_argument('--Submodel_USE_TRANSFORM', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Save_data', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Set_data', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--Save_dimension_chosen_nums', type=str, default=str('false'), help='the number of categories to the last label')
    parser.add_argument('--state_type', type=str, default=str('0'), help='0表示完全,1表示state中只有7维统计, 2表示state中只有d维GAT表征, 3表示state中只有77维')
    parser.add_argument('--reward_type', type=str, default=str('0'), help='0表示完全,1表示reward中只有MSE的+负向的, 2表示reward中只有MAE的+负向的, 3表示reward中只有相关性的+负向的')
    parser.add_argument('--singleCapacity', type=int, default=200, help='the number of categories to the last label')
    parser.add_argument('--FocalLoss_alpha', type=float, default=0.75, help='the number of categories to the last label')
    parser.add_argument('--FocalLoss_gamma', type=float, default=3, help='the number of categories to the last label')
    parser.add_argument('--OUTPUT_TENSORS', type=str, default='True', help='the number of categories to the last label')

    opt = parser.parse_args()

    # 调用 train_aesthetic_model 函数
    train_aesthetic_model(opt)