import random
import os
from tqdm import tqdm
import time
import json
import argparse
import data_process
import optimizers
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
import datetime
from collections import Counter
from scipy.stats import describe
import pandas as pd


def get_task(task_type):
    task_dict = {
        'ethos': 'Determine whether the following sentence is hate speech (Yes) or not (No).',
        'liar': 'Determine whether the statement is a lie (Yes) or not (No) based on the context and other information.',
        'ar_sarcasm': 'Determine whether the tweet is sarcastic (Yes) or not (No).',
        'asap': 'Evaluate the quality of student essays based on the given scoring rubric and assign an appropriate score.'
    }
    return task_dict[task_type]


def get_dataset(data_name, data_dir, max_threads, essay_set_id=None):
    if data_name == 'ethos':
        return data_process.EthosBinaryTask(data_dir, max_threads)
    elif data_name == 'jailbreak':
        return data_process.JailbreakBinaryTask(data_dir, max_threads)
    elif data_name == 'liar':
        return data_process.DefaultHFBinaryTask(data_dir, max_threads)
    elif data_name == 'ar_sarcasm':
        return data_process.DefaultHFBinaryTask(data_dir, max_threads)
    elif data_name == 'asap':
        if essay_set_id is None:
            raise ValueError("essay_set_id must be provided for ASAP dataset")
        return data_process.ASAPTask(data_dir, essay_set_id, max_threads)
    else:
        raise Exception(f'Unsupported task: {data_name}')


def get_optimizer(optimizer_name, config, task, max_threads, model_name="chatgpt"):
    if optimizer_name == 'protegi':
        return optimizers.ProTeGi(config, task, max_threads)
    elif optimizer_name == 'rubric':
        return optimizers.RubricOptimizer(config, task, max_threads, model_name)
    else:
        raise Exception(f'Unsupported optimizer: {optimizer_name}')


def get_args():
    parser = argparse.ArgumentParser()
    #数据集
    parser.add_argument('--dataset', default='asap', choices=['ethos', 'liar', 'ar_sarcasm', 'asap'], help='Dataset to use')
    #数据集路径
    parser.add_argument('--data_dir', default='data', help='Directory containing the data')
    #输出文件
    parser.add_argument('--out', default='asap_results_1.log', help='Output file for results')
    #模型
    parser.add_argument('--model', default='glm', choices=['chatgpt', 'glm'], help='Model to use for API calls')
    #作文集ID
    parser.add_argument('--essay_set_id', default=1, type=int, choices=range(1, 9), help='Essay set ID for ASAP dataset (1-8)')
    #最大线程数
    parser.add_argument('--max_threads', default=4, type=int, help='Maximum number of threads for parallel processing')
    #温度
    parser.add_argument('--temperature', default=0.3, type=float, help='Temperature for API calls')
    #迭代优化次数
    parser.add_argument('--rounds', default=5, type=int, help='Number of optimization rounds')
    #测试集数量
    parser.add_argument('--n_test_exs', default=10, type=int, help='Number of test examples to evaluate')
    #小批量大小 
    parser.add_argument('--minibatch_size', default=5, type=int, help='Minibatch size for training')
    #每个错误生成的原因数量
    parser.add_argument('--reasons_per_error', default=1, type=int, help='Number of reasons to generate per error')
    #每个原因生成建议数量
    parser.add_argument('--suggestions_per_error_reason', default=1, type=int, help='Number of suggestions to generate per error reason')
    #优化器
    parser.add_argument('--optimizer', default='rubric', choices=['protegi', 'rubric'], help='Optimizer to use')
    #保存性能指标图表
    parser.add_argument('--save_plots', action='store_true', help='Save performance plots')
    args = parser.parse_args()
    return args


def plot_metrics(qwk_scores, mae_scores, output_file):
    """绘制性能指标图表并保存"""
    rounds = list(range(len(qwk_scores)))
    
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
    
    # QWK图表
    ax1.plot(rounds, qwk_scores, 'o-', color='blue')
    ax1.set_title('QWK Score per Round')
    ax1.set_xlabel('Round')
    ax1.set_ylabel('QWK Score')
    ax1.grid(True)
    
    # MAE图表
    ax2.plot(rounds, mae_scores, 'o-', color='red')
    ax2.set_title('MAE per Round')
    ax2.set_xlabel('Round')
    ax2.set_ylabel('Mean Absolute Error')
    ax2.grid(True)
    
    plt.tight_layout()
    plt.savefig(output_file)
    plt.close()


# 添加数据集统计信息
def log_dataset_stats(dataset, outf):
    train_exs = dataset.get_train_examples()
    val_exs = dataset.get_val_examples()
    test_exs = dataset.get_test_examples()
    
    train_scores = [ex['score'] for ex in train_exs]
    val_scores = [ex['score'] for ex in val_exs]
    test_scores = [ex['score'] for ex in test_exs]
    
    outf.write("\n======== 数据集统计信息 ========\n")
    outf.write(f"训练集大小: {len(train_exs)}\n")
    outf.write(f"验证集大小: {len(val_exs)}\n")
    outf.write(f"测试集大小: {len(test_exs)}\n\n")
    
    outf.write("分数分布统计:\n")
    outf.write("训练集分数分布: " + str(Counter(train_scores)) + "\n")
    outf.write("验证集分数分布: " + str(Counter(val_scores)) + "\n")
    outf.write("测试集分数分布: " + str(Counter(test_scores)) + "\n\n")


# 添加预测分数详细分析
def log_prediction_analysis(true_scores, pred_scores, outf, title="预测分析"):
    errors = [abs(t - p) for t, p in zip(true_scores, pred_scores)]
    error_stats = describe(errors)
    
    outf.write(f"\n======== {title} ========\n")
    outf.write(f"平均误差: {np.mean(errors):.4f}\n")
    outf.write(f"误差中位数: {np.median(errors):.4f}\n")
    outf.write(f"误差标准差: {np.std(errors):.4f}\n")
    outf.write(f"误差最小值: {np.min(errors):.4f}\n")
    outf.write(f"误差最大值: {np.max(errors):.4f}\n")
    outf.write(f"误差分布: {Counter(errors)}\n\n")
    
    # 记录准确率和误差范围内的准确率
    exact_match = sum(1 for t, p in zip(true_scores, pred_scores) if t == p)
    within_one = sum(1 for t, p in zip(true_scores, pred_scores) if abs(t - p) <= 1)
    within_two = sum(1 for t, p in zip(true_scores, pred_scores) if abs(t - p) <= 2)
    
    outf.write(f"精确匹配率: {exact_match/len(true_scores):.4f}\n")
    outf.write(f"误差≤1的比例: {within_one/len(true_scores):.4f}\n")
    outf.write(f"误差≤2的比例: {within_two/len(true_scores):.4f}\n\n")


# 在测试集上评估最终结果后，生成Excel表格
def save_results_to_excel(essays, true_scores, pred_scores, output_file):
    """将测试结果保存到Excel文件"""
    # 创建数据框
    results_df = pd.DataFrame({
        'essay': essays,
        'APO评分': pred_scores,
        '人工评分': true_scores
    })
    
    # 保存到Excel文件
    results_df.to_excel(output_file, index=False)
    print(f"测试结果已保存至 {output_file}")


if __name__ == '__main__':
    args = get_args()
    config = vars(args)
    
    # 获取任务描述
    task = get_task('asap' if args.dataset == 'asap' else args.dataset)
    
    # 获取数据集
    dataset = get_dataset(args.dataset, args.data_dir, args.max_threads, args.essay_set_id)
    
    # 获取优化器
    optimizer = get_optimizer(args.optimizer, config, task, args.max_threads, args.model)
    
    # 获取训练集、验证集和测试集
    if args.dataset == 'asap':
        train_exs = dataset.get_train_examples(include_rubric=True, use_marked_rubric=True)
        val_exs = dataset.get_val_examples(include_rubric=True, use_marked_rubric=False)
        test_exs = dataset.get_test_examples(include_rubric=True, use_marked_rubric=False)
    else:
        train_exs = dataset.get_train_examples()
        test_exs = dataset.get_test_examples()
        val_exs = test_exs[:len(test_exs)//2]  # 对于非ASAP数据集，将测试集一分为二作为验证集
    
    # 初始化输出文件
    if os.path.exists(args.out):
        os.remove(args.out)
    
    print(config)
    
    with open(args.out, 'a', encoding='utf-8') as outf:
        outf.write(json.dumps(config) + '\n')
        # 添加数据集统计信息
        log_dataset_stats(dataset, outf)
    
    # 如果是作文评分任务，使用RubricOptimizer
    if args.dataset == 'asap' and args.optimizer == 'rubric':
        # 获取初始评分标准
        original_rubric = train_exs[0]['scoring_rubric']
        print(f"\n初始评分标准:\n{original_rubric}\n")
        
        with open(args.out, 'a', encoding='utf-8') as outf:
            outf.write(f"\n初始评分标准:\n{original_rubric}\n")
        
        # 记录性能指标
        qwk_scores = []
        mae_scores = []
        best_rubric = original_rubric
        best_qwk = -1
        
        # 初始评估
        print("评估初始评分标准...")
        qwk, mae, _, true_scores, pred_scores = optimizer.evaluate(original_rubric, val_exs[:args.n_test_exs])
        qwk_scores.append(qwk)
        mae_scores.append(mae)
        
        print(f"初始QWK: {qwk:.4f}, MAE: {mae:.4f}")
        with open(args.out, 'a', encoding='utf-8') as outf:
            outf.write(f"初始QWK: {qwk:.4f}, MAE: {mae:.4f}\n")
            outf.write(f"真实分数: {true_scores}\n")
            outf.write(f"预测分数: {pred_scores}\n\n")
            log_prediction_analysis(true_scores, pred_scores, outf, "初始预测分析")
        
        # 迭代优化
        for round_idx in range(args.rounds):
            print(f"\n======== 第 {round_idx+1}/{args.rounds} 轮优化 ========")
            start_time = time.time()
            start_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            
            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write(f"\n======== 第 {round_idx+1}/{args.rounds} 轮优化 ========\n")
                outf.write(f"开始时间: {start_datetime}\n")
            
            # 在训练集上评估
            _, _, essays, true_scores, pred_scores = optimizer.evaluate(best_rubric, 
                                                                       train_exs[:args.minibatch_size])
            
            # 找出错误例子
            error_examples = optimizer.find_error_examples(essays, true_scores, pred_scores, 
                                                         n_samples=min(3, args.minibatch_size))
            
            # 分析错误
            error_analysis = optimizer.analyze_errors(best_rubric, error_examples)
            
            # 记录错误分析
            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write("错误分析:\n")
                for analysis in error_analysis:
                    outf.write(f"作文: {analysis['example']['essay'][:100]}...\n")
                    outf.write(f"真实分数: {analysis['example']['true_score']}, 预测分数: {analysis['example']['pred_score']}\n")
                    outf.write("错误原因:\n")
                    for reason in analysis['reasons']:
                        outf.write(f"- {reason}\n")
                    outf.write("\n")
            
            # 生成改进建议
            suggestions = optimizer.generate_improvements(best_rubric, error_analysis)
            
            # 记录改进建议
            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write("改进建议:\n")
                for suggestion in suggestions:
                    outf.write(f"- {suggestion}\n")
                outf.write("\n")
            
            # 应用改进建议
            modified_rubrics = optimizer.apply_improvements(best_rubric, suggestions)
            
            if not modified_rubrics:
                print("没有生成有效的改进建议，结束优化")
                with open(args.out, 'a', encoding='utf-8') as outf:
                    outf.write("没有生成有效的改进建议，结束优化\n")
                break
            
            # 评估改进
            results = optimizer.evaluate_improvements(best_rubric, modified_rubrics, val_exs[:args.n_test_exs])
            
            # 更新最佳评分标准
            new_best = results[0]
            current_qwk = new_best['qwk']
            current_mae = new_best['mae']
            
            qwk_scores.append(current_qwk)
            mae_scores.append(current_mae)
            
            if current_qwk > best_qwk:
                best_rubric = new_best['rubric']
                best_qwk = current_qwk
                print(f"找到更好的评分标准! QWK: {current_qwk:.4f}, MAE: {current_mae:.4f}")
            else:
                print(f"没有找到更好的评分标准。当前QWK: {current_qwk:.4f}, MAE: {current_mae:.4f}")
            
            # 评估改进后添加每个版本的详细信息
            # with open(args.out, 'a', encoding='utf-8') as outf:
            #     outf.write("\n改进版本评估结果:\n")
            #     for i, result in enumerate(results):
            #         outf.write(f"版本 {i+1}{' (原始)' if result['is_original'] else ''}:\n")
            #         outf.write(f"QWK: {result['qwk']:.4f}, MAE: {result['mae']:.4f}\n")
            #         if i < 3:  # 只显示前3个版本的评分标准内容
            #             outf.write(f"评分标准:\n{result['rubric']}\n\n")
            #         else:
            #             outf.write("评分标准内容已省略...\n\n")
            
            # 记录本轮结果
            end_time = time.time()
            end_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            round_duration = end_time - start_time
            
            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write(f"本轮QWK: {current_qwk:.4f}, MAE: {current_mae:.4f}\n")
                outf.write(f"当前最佳QWK: {best_qwk:.4f}\n")
                outf.write(f"本轮运行时间: {round_duration:.2f}秒\n")
                outf.write(f"开始时间: {start_datetime}\n")
                outf.write(f"结束时间: {end_datetime}\n")
                outf.write(f"当前最佳评分标准:\n{best_rubric}\n\n")
        
        # 在测试集上评估最终结果
        final_qwk, final_mae, essays, true_scores, pred_scores = optimizer.evaluate(best_rubric, test_exs[:args.n_test_exs])
        
        print("\n优化完成!")
        print(f"最终评分标准:\n{best_rubric}")
        print(f"测试集QWK: {final_qwk:.4f}, MAE: {final_mae:.4f}")
        
        # 计算与初始评分标准相比的改进百分比
        initial_qwk, initial_mae, _, _, _ = optimizer.evaluate(original_rubric, test_exs[:args.n_test_exs])
        qwk_improvement = ((final_qwk - initial_qwk) / initial_qwk) * 100 if initial_qwk != 0 else float('inf')
        mae_improvement = ((initial_mae - final_mae) / initial_mae) * 100 if initial_mae != 0 else float('inf')
        
        with open(args.out, 'a', encoding='utf-8') as outf:
            outf.write("\n======== 最终结果 ========\n")
            outf.write(f"最终评分标准:\n{best_rubric}\n\n")
            outf.write(f"测试集QWK: {final_qwk:.4f}, MAE: {final_mae:.4f}\n")
            outf.write(f"初始测试集QWK: {initial_qwk:.4f}, MAE: {initial_mae:.4f}\n")
            outf.write(f"QWK改进: {qwk_improvement:.2f}%, MAE改进: {mae_improvement:.2f}%\n")
            outf.write(f"真实分数: {true_scores}\n")
            outf.write(f"预测分数: {pred_scores}\n\n")
            log_prediction_analysis(true_scores, pred_scores, outf, "最终预测分析")
        
        # 生成Excel表格
        excel_file = f"asap_essay{args.essay_set_id}_results.xlsx"
        save_results_to_excel(essays, true_scores, pred_scores, excel_file)
        
        # 保存性能指标图表
        if args.save_plots:
            plot_file = f"asap_essay{args.essay_set_id}_metrics.png"
            plot_metrics(qwk_scores, mae_scores, plot_file)
            print(f"性能指标图表已保存至 {plot_file}")
    
    # 对于其他任务，使用原有的ProTeGi优化器
    else:
        train_metrics, test_metrics = [], []
        random.seed(42)
        # 初始化网络结构
        nets = optimizer.init_net()

        for round in tqdm(range(config['rounds'] + 1)):
            print(f'\n现在的网络结构（步骤）：\n{nets}')

            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write(f"\n======== ROUND {round} ========\n")
                outf.write(f'现在的网络结构（步骤）：\n{nets}\n')

            start = time.time()

            # 测试并计算F1
            tf1, ttexts, tlabels, tpreds = optimizer.evaluate(nets, test_exs, n=args.n_test_exs)
            test_metrics.append(tf1)
            print(f'测试集f1指标：{test_metrics}\n')
            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write(f'测试集f1指标：{test_metrics}\n')

            # 测minibatch并计算F1
            minibatch = random.sample(train_exs, k=min(args.minibatch_size, len(train_exs)))
            f1, texts, labels, preds = optimizer.evaluate(nets, minibatch)
            train_metrics.append(f1)
            print(f'训练集f1指标：{train_metrics}\n')

            # 计算损失
            loss = optimizer.cal_loss(dataset, nets, texts, labels, preds)
            print(f'\n损失（错误原因）：{loss}')
            # 获取梯度
            gradients = optimizer.get_gradients(nets, loss)
            print(f'\n梯度（建议）：{gradients}')
            # 计算更新梯度时的学习率
            learning_rates = optimizer.cal_lr(nets, gradients)
            print(f'\n梯度的学习率：{learning_rates}')
            # 更新网络
            learn_gradients = []
            for gradient, lr in learning_rates.items():
                if lr == 1:
                    learn_gradients.append(gradient)
            if len(learn_gradients) != 0:
                learn_gradients = '\n'.join([f'{i+1}.{g}' for i, g in enumerate(learn_gradients)])
                nets = optimizer.update_net(nets, learn_gradients)

            with open(args.out, 'a', encoding='utf-8') as outf:
                outf.write(f'训练集f1指标：{train_metrics}\n')
                outf.write(f'损失（错误原因）：{loss}\n')
                outf.write(f'梯度（建议）：{gradients}\n')
                outf.write(f'梯度的学习率：{learning_rates}\n')
                outf.write(f'运行时间：{time.time() - start}\n')

        # 测试并计算F1
        tf1, ttexts, tlabels, tpreds = optimizer.evaluate(nets, test_exs, n=args.n_test_exs)
        test_metrics.append(tf1)
        print(f'测试集f1指标：{test_metrics}\n')

        # 测minibatch并计算F1
        minibatch = random.sample(train_exs, k=min(args.minibatch_size, len(train_exs)))
        f1, texts, labels, preds = optimizer.evaluate(nets, minibatch)
        train_metrics.append(f1)
        print(f'训练集f1指标：{train_metrics}\n')
        with open(args.out, 'a', encoding='utf-8') as outf:
            outf.write(f'测试集f1指标：{test_metrics}\n')
            outf.write(f'训练集f1指标：{train_metrics}\n')

    print("DONE!")
