import json
import os
import torch
import argparse
import datetime
from transformers import AutoModelForCausalLM, AutoTokenizer
from tqdm import tqdm
import numpy as np

# 加载Qwen3模型
def load_qwen3_model():
    # model_name = "/mnt/6t/lyh/1/Qwen3-8B"
    model_name = "/mnt/6t/LLaMA-Factory/Qwen3-demo1"
    
    # 加载tokenizer和模型
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype="auto",
        device_map="auto"
    )
    
    return model, tokenizer

# 加载ACE2005测试数据
def load_ace_test_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data

# 使用Qwen3进行事件抽取
def extract_event_with_qwen3(model, tokenizer, text, image_path=None):
    # 构建prompt
    prompt = """你是一名专业的事件抽取专家，请仔细阅读以下文本内容。请根据文本数据辅助进行事件抽取，有以下事件抽取选项：'None', 'Conflict:Attack', 'Movement:Transport', 'Life:Die', 'Contact:Meet', 'Personnel:End-Position', 'Personnel:Elect', 'Life:Injure', 'Transaction:Transfer-Money', 'Contact:Phone-Write','Justice:Trial-Hearing', 'Justice:Charge-Indict', 'Transaction:Transfer-Ownership', 'Personnel:Start-Position','Justice:Sentence', 'Justice:Arrest-Jail', 'Life:Marry', 'Conflict:Demonstrate', 'Justice:Convict', 'Justice:Sue', 'Life:Be-Born'。注意，不要更改或新增其他事件选项。请在输出时仔细分析文本数据。输出时直接输出选项，不需要添加其他符号。

文本: {text}"""
    
    # 构建消息
    messages = [
        {"role": "user", "content": prompt.format(text=text)}
    ]
    
    # 应用聊天模板
    input_text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=False
    )
    
    # 准备模型输入
    model_inputs = tokenizer([input_text], return_tensors="pt").to(model.device)
    
    # 生成回答
    generated_ids = model.generate(
        **model_inputs,
        max_new_tokens=512,
        temperature=0.2,  # 较低的温度使输出更确定性
        do_sample=True
    )
    
    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
    
    # 解析思考内容和回答
    try:
        # 查找</think>标记的索引
        index = len(output_ids) - output_ids[::-1].index(151668)
    except ValueError:
        index = 0
    
    # 提取回答内容
    content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
    
    # 清理和标准化输出
    # 寻找最匹配的事件类型
    event_types = ['None', 'Conflict:Attack', 'Movement:Transport', 'Life:Die', 
                  'Contact:Meet', 'Personnel:End-Position', 'Personnel:Elect', 
                  'Life:Injure', 'Transaction:Transfer-Money', 'Contact:Phone-Write',
                  'Justice:Trial-Hearing', 'Justice:Charge-Indict', 
                  'Transaction:Transfer-Ownership', 'Personnel:Start-Position',
                  'Justice:Sentence', 'Justice:Arrest-Jail', 'Life:Marry', 
                  'Conflict:Demonstrate', 'Justice:Convict', 'Justice:Sue', 'Life:Be-Born']
    
    # 从响应中找到最匹配的事件类型
    for event_type in event_types:
        if event_type.lower() in content.lower():
            return event_type
    
    # 如果没有找到明确匹配，则返回None
    return "None"

# 自定义函数：计算准确率
def calculate_accuracy(gold_labels, predictions):
    if len(gold_labels) != len(predictions):
        raise ValueError("gold_labels和predictions的长度必须相同")
    
    correct = sum(1 for g, p in zip(gold_labels, predictions) if g == p)
    return correct / len(gold_labels) if len(gold_labels) > 0 else 0

# 自定义函数：计算分类报告
def calculate_classification_report(gold_labels, predictions):
    # 获取所有唯一标签
    all_labels = sorted(list(set(gold_labels + predictions)))
    
    # 初始化结果字典
    report = {
        "labels": {},
        "accuracy": calculate_accuracy(gold_labels, predictions),
        "macro_avg": {"precision": 0, "recall": 0, "f1-score": 0, "support": 0},
        "weighted_avg": {"precision": 0, "recall": 0, "f1-score": 0, "support": len(gold_labels)}
    }
    
    # 计算每个标签的指标
    label_precisions = {}
    label_recalls = {}
    label_f1s = {}
    label_supports = {}
    
    for label in all_labels:
        # 计算true positives, false positives, false negatives
        tp = sum(1 for g, p in zip(gold_labels, predictions) if g == label and p == label)
        fp = sum(1 for g, p in zip(gold_labels, predictions) if g != label and p == label)
        fn = sum(1 for g, p in zip(gold_labels, predictions) if g == label and p != label)
        support = sum(1 for g in gold_labels if g == label)
        
        # 计算精确度、召回率和F1值
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
        
        # 存储结果
        label_precisions[label] = precision
        label_recalls[label] = recall
        label_f1s[label] = f1
        label_supports[label] = support
        
        # 添加到报告
        report["labels"][label] = {
            "precision": precision,
            "recall": recall,
            "f1-score": f1,
            "support": support
        }
    
    # 计算宏平均值（macro average）
    if all_labels:
        report["macro_avg"]["precision"] = sum(label_precisions.values()) / len(all_labels)
        report["macro_avg"]["recall"] = sum(label_recalls.values()) / len(all_labels)
        report["macro_avg"]["f1-score"] = sum(label_f1s.values()) / len(all_labels)
        report["macro_avg"]["support"] = sum(label_supports.values())
    
    # 计算加权平均值（weighted average）
    total_support = sum(label_supports.values())
    if total_support > 0:
        report["weighted_avg"]["precision"] = sum(p * s for p, s in zip(label_precisions.values(), label_supports.values())) / total_support
        report["weighted_avg"]["recall"] = sum(r * s for r, s in zip(label_recalls.values(), label_supports.values())) / total_support
        report["weighted_avg"]["f1-score"] = sum(f * s for f, s in zip(label_f1s.values(), label_supports.values())) / total_support
    
    return report

# 评估函数
def evaluate_event_extraction(predictions, gold_labels):
    # 计算准确率
    accuracy = calculate_accuracy(gold_labels, predictions)
    
    # 生成详细的分类报告
    report = calculate_classification_report(gold_labels, predictions)
    
    # 获取加权F1分数
    weighted_f1 = report["weighted_avg"]["f1-score"]
    
    return {
        "accuracy": accuracy,
        "f1_score": weighted_f1,
        "classification_report": report
    }

# 创建日志目录和文件
def setup_logging(args):
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    log_dir = f"/mnt/6t/lyh/1/logs"
    os.makedirs(log_dir, exist_ok=True)
    
    max_samples_str = f"_{args.max_samples}" if args.max_samples > 0 else ""
    log_file = os.path.join(log_dir, f"ace_qwen3_event_extraction_{timestamp}{max_samples_str}.log")
    
    return log_dir, log_file

# 记录日志
def log_result(log_file, text, gold_label, prediction, TP, FP, FN, precision=None, recall=None, f1=None):
    with open(log_file, 'a', encoding='utf-8') as f:
        f.write(f"文本: {text}\n")
        f.write(f"标注事件: {gold_label}\n")
        f.write(f"预测事件: {prediction}\n")
        f.write(f"当前TP: {TP}, FP: {FP}, FN: {FN}\n")
        
        if precision is not None and recall is not None and f1 is not None:
            f.write(f"精确度: {precision:.4f}, 召回率: {recall:.4f}, F1值: {f1:.4f}\n")
        
        f.write("-" * 80 + "\n")

# 格式化输出分类报告
def format_classification_report(report):
    lines = []
    lines.append("Classification Report:")
    lines.append("-" * 80)
    lines.append(f"{'Label':<20} {'Precision':<10} {'Recall':<10} {'F1-Score':<10} {'Support':<10}")
    lines.append("-" * 80)
    
    for label, metrics in report["labels"].items():
        lines.append(f"{label:<20} {metrics['precision']:<10.4f} {metrics['recall']:<10.4f} {metrics['f1-score']:<10.4f} {metrics['support']:<10d}")
    
    lines.append("-" * 80)
    lines.append(f"{'Accuracy':<20} {'':<10} {'':<10} {report['accuracy']:<10.4f} {report['weighted_avg']['support']:<10d}")
    lines.append(f"{'Macro Avg':<20} {report['macro_avg']['precision']:<10.4f} {report['macro_avg']['recall']:<10.4f} {report['macro_avg']['f1-score']:<10.4f} {report['macro_avg']['support']:<10d}")
    lines.append(f"{'Weighted Avg':<20} {report['weighted_avg']['precision']:<10.4f} {report['weighted_avg']['recall']:<10.4f} {report['weighted_avg']['f1-score']:<10.4f} {report['weighted_avg']['support']:<10d}")
    
    return "\n".join(lines)

# 主函数
def main(args):
    # 设置日志
    log_dir, log_file = setup_logging(args)
    
    # 加载模型和tokenizer
    print("Loading Qwen3 model...")
    model, tokenizer = load_qwen3_model()
    
    # 加载测试数据
    test_data_path = "/mnt/6t/lyh/ace2005-preprocessing/Ch_preprocess/output/test.json"
    print(f"Loading test data from {test_data_path}...")
    test_data = load_ace_test_data(test_data_path)
    
    # 限制样本数量
    if args.max_samples > 0:
        test_data = test_data[:args.max_samples]
        print(f"Limiting to {args.max_samples} samples for testing")
    
    # 存储预测结果和真实标签
    predictions = []
    gold_labels = []
    texts = []
    
    # 初始化TP、FP、FN计数器
    TP = 0  # 真正例：预测值与真实值相等且不为None
    FP = 0  # 假正例：预测值不为None但与真实值不相等
    FN = 0  # 假负例：预测值为None但真实值不为None
    
    # 对每个样本进行推理
    print("Starting inference...")
    for i, sample in enumerate(tqdm(test_data)):
        # 获取句子文本
        text = sample["sentence"]
        
        # 获取真实的事件类型
        if sample["golden-event-mentions"]:
            # 如果有多个事件，只取第一个作为示例
            gold_event_type = sample["golden-event-mentions"][0]["event_type"]
        else:
            gold_event_type = "None"
        
        # 使用模型进行预测
        predicted_event_type = extract_event_with_qwen3(model, tokenizer, text)
        
        # 存储预测和真实标签
        predictions.append(predicted_event_type)
        gold_labels.append(gold_event_type)
        texts.append(text)
        
        # 更新TP、FP、FN计数
        if predicted_event_type == gold_event_type:
            # 1. 预测值与真实值相等，计为TP
            TP += 1
        elif predicted_event_type != "None" and predicted_event_type != gold_event_type:
            # 2. 预测值不为"None"且与真实值不相等，计为FP
            FP += 1
        elif predicted_event_type == "None" and gold_event_type != "None":
            # 3. 预测值为"None"且真实值不为"None"，计为FN
            FN += 1
        
        # 计算当前精确度、召回率和F1值
        precision = TP / (TP + FP) if TP + FP > 0 else 0
        recall = TP / (TP + FN) if TP + FN > 0 else 0
        f1_value = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0
        
        # 记录日志
        log_result(log_file, text, gold_event_type, predicted_event_type, TP, FP, FN)
        
        print(f"Sample {i+1}:")
        print(f"Text: {text}")
        print(f"Predicted: {predicted_event_type}")
        print(f"Gold: {gold_event_type}")
        print(f"Current TP: {TP}, FP: {FP}, FN: {FN}")
        print(f"Current Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1_value:.4f}")
        print("-" * 50)
        
        # 不再保存中间JSON结果
    
    # 计算最终的精确度、召回率和F1值
    precision = TP / (TP + FP) if TP + FP > 0 else 0
    recall = TP / (TP + FN) if TP + FN > 0 else 0
    f1_value = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0
    
    # 记录最终统计结果到日志
    with open(log_file, 'a', encoding='utf-8') as f:
        f.write("\n" + "="*80 + "\n")
        f.write("最终统计结果\n")
        f.write(f"TP (True Positives): {TP}\n")
        f.write(f"FP (False Positives): {FP}\n")
        f.write(f"FN (False Negatives): {FN}\n")
        f.write(f"精确度 (Precision): {precision:.4f}\n")
        f.write(f"召回率 (Recall): {recall:.4f}\n")
        f.write(f"F1值 (F1-Score): {f1_value:.4f}\n")
    
    # 评估结果
    print("Evaluating results...")
    evaluation_results = evaluate_event_extraction(predictions, gold_labels)
    
    # 打印评估结果
    print(f"Accuracy: {evaluation_results['accuracy']:.4f}")
    print(f"Weighted F1 Score: {evaluation_results['f1_score']:.4f}")
    print(format_classification_report(evaluation_results['classification_report']))
    
    # 打印TP、FP、FN统计和精确度、召回率、F1值
    print("\n--- 结果统计 ---")
    print(f"TP (True Positives): {TP}")
    print(f"FP (False Positives): {FP}")
    print(f"FN (False Negatives): {FN}")
    print(f"精确度 (Precision): {precision:.4f}")
    print(f"召回率 (Recall): {recall:.4f}")
    print(f"F1值 (F1-Score): {f1_value:.4f}")
    
    # 不再保存最终JSON结果
    
    print(f"Logs saved to {log_file}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="ACE2005事件抽取使用Qwen3模型")
    parser.add_argument("--max_samples", type=int, default=-1, help="最大处理样本数量，-1表示处理所有样本")
    args = parser.parse_args()
    
    main(args) 