import torch
from PIL import Image
from transformers import AutoModelForCausalLM
import pandas as pd
import os
import gc
import time

# 加载模型
model = AutoModelForCausalLM.from_pretrained("/mnt/6t/lyh/1/Ovis1.6-Gemma2-9B",
                                             torch_dtype=torch.bfloat16,
                                             multimodal_max_length=8192,
                                             trust_remote_code=True).cuda()
text_tokenizer = model.get_text_tokenizer()
visual_tokenizer = model.get_visual_tokenizer()

# 读取数据集
data_path = "/mnt/6t/lyh/1/M2E2/M2E2/text/mee.tsv"
image_dir = "/mnt/6t/lyh/1/M2E2/M2E2/image/image/image"
df = pd.read_csv(data_path, sep='\t')

# 定义事件类型列表
event_types = ['Justice:Arrest-Jail', 'Conflict:Attack', 'Movement:Transport', 
               'Contact:Phone-Write', 'Life:Die', 'Conflict:Demonstrate', 
               'Transaction:Transfer-Money', 'Contact:Meet', 'none']

# 定义提示词
prompt_template = "请仔细阅读以下文本和观察图像内容。请根据文本数据和图像数据辅助进行事件抽取，有以下事件抽取选项：'Justice:Arrest-Jail','Conflict:Attack','Movement:Transport','Contact:Phone-Write','Life:Die','Conflict:Demonstrate','Transaction:Transfer-Money','Contact:Meet','none'。注意，不要更改或新增其他事件选项，只允许在这几种事件中选择。如果根据文本和图片无法准确判断事件类型，请输出'none'。请在输出时仔细分析文本和图片数据。输出时直接输出选项，不需要添加其他符号。"

# 初始化评估指标计数器
TP = 0
FP = 0
FN = 0

# 定义批处理大小
batch_size = 1  # 可根据GPU内存调整

# 定义GPU内存清理函数
def clear_gpu_memory():
    torch.cuda.empty_cache()
    gc.collect()

# 保存中间结果的函数
def save_checkpoint(results, checkpoint_file="event_extraction_checkpoint.csv"):
    pd.DataFrame(results).to_csv(checkpoint_file, index=False)
    print(f"保存检查点到 {checkpoint_file}")

# 加载检查点（如果存在）
checkpoint_file = "event_extraction_checkpoint.csv"
processed_ids = set()
results = []

if os.path.exists(checkpoint_file):
    checkpoint_df = pd.read_csv(checkpoint_file)
    results = checkpoint_df.to_dict('records')
    processed_ids = set(checkpoint_df['image_id'].tolist())
    
    # 恢复评估指标
    for result in results:
        if result['result_type'] == 'TP':
            TP += 1
        elif result['result_type'] == 'FP':
            FP += 1
        if 'FN' in result['result_type']:
            FN += 1
    
    print(f"从检查点恢复，已处理 {len(processed_ids)} 个样本")

# 遍历数据集
try:
    # 计算总批次数
    total_batches = (len(df) + batch_size - 1) // batch_size
    
    for batch_idx in range(total_batches):
        start_idx = batch_idx * batch_size
        end_idx = min((batch_idx + 1) * batch_size, len(df))
        
        batch_df = df.iloc[start_idx:end_idx]
        
        # 处理每个批次
        for idx, row in batch_df.iterrows():
            try:
                # 获取图片路径、文本和真实标签
                image_id = row['#1 ImageID']
                
                # 跳过已处理的样本
                if image_id in processed_ids:
                    print(f"跳过已处理的样本: {image_id}")
                    continue
                
                text = row['#2 String']
                ground_truth = row['#3 String']
                
                image_path = os.path.join(image_dir, image_id)
                
                # 检查图片是否存在
                if not os.path.exists(image_path):
                    print(f"图片不存在: {image_path}")
                    continue
                
                # 加载图片
                image = Image.open(image_path).convert('RGB')
                
                # 构建查询
                query = f'<image>\n{prompt_template}\n\n文本内容：{text}'
                
                # 格式化对话
prompt, input_ids, pixel_values = model.preprocess_inputs(query, [image])
attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id)
input_ids = input_ids.unsqueeze(0).to(device=model.device)
attention_mask = attention_mask.unsqueeze(0).to(device=model.device)
pixel_values = [pixel_values.to(dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)]

                # 生成输出
with torch.inference_mode():
    gen_kwargs = dict(
        max_new_tokens=1024,
        do_sample=False,
        top_p=None,
        top_k=None,
        temperature=None,
        repetition_penalty=None,
        eos_token_id=model.generation_config.eos_token_id,
        pad_token_id=text_tokenizer.pad_token_id,
        use_cache=True
    )
    output_ids = model.generate(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, **gen_kwargs)[0]
    output = text_tokenizer.decode(output_ids, skip_special_tokens=True)
                    
                    # 处理输出，提取预测的事件类型
                    prediction = output.split(prompt_template)[-1].strip()
                    
                    # 清理预测结果，尝试匹配到事件类型
                    prediction_clean = None
                    for event_type in event_types:
                        if event_type.lower() in prediction.lower():
                            prediction_clean = event_type
                            break
                    
                    if prediction_clean is None:
                        prediction_clean = 'none'
                    
                    # 评估结果
                    result_type = ""
                    if prediction_clean == ground_truth:
                        if prediction_clean != 'none':
                            TP += 1
                            result_type = "TP"
                    elif prediction_clean != 'none':
                        FP += 1
                        result_type = "FP"
                    
                    if ground_truth != 'none' and prediction_clean != ground_truth:
                        FN += 1
                        result_type = "FN"
                    
                    # 计算当前的评估指标
                    precision = TP / (TP + FP) if (TP + FP) > 0 else 0
                    recall = TP / (TP + FN) if (TP + FN) > 0 else 0
                    f1_score = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
                    
                    # 输出结果
                    print(f"图片: {image_id}")
                    print(f"文本: {text}")
                    print(f"模型原始输出: {prediction}")
                    print(f"预测值: {prediction_clean}")
                    print(f"真实值: {ground_truth}")
                    print(f"结果类型: {result_type}")
                    print(f"当前指标 - TP: {TP}, FP: {FP}, FN: {FN}")
                    print(f"Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1_score:.4f}")
                    print("-" * 80)
                    
                    # 保存结果
                    result_dict = {
                        'image_id': image_id,
                        'text': text,
                        'prediction': prediction_clean,
                        'ground_truth': ground_truth,
                        'result_type': result_type,
                        'TP': TP,
                        'FP': FP,
                        'FN': FN,
                        'precision': precision,
                        'recall': recall,
                        'f1_score': f1_score
                    }
                    results.append(result_dict)
                    processed_ids.add(image_id)
                
                # 释放不需要的变量
                del input_ids, attention_mask, pixel_values, output_ids
                
            except Exception as e:
                print(f"处理样本 {idx} 时出错: {str(e)}")
                continue
        
        # 每处理完一个批次，清理GPU内存
        clear_gpu_memory()
        
        # 每处理5个批次，保存一次检查点
        if batch_idx % 5 == 0 and batch_idx > 0:
            save_checkpoint(results, checkpoint_file)
            
        # 打印进度
        print(f"已完成 {batch_idx+1}/{total_batches} 批次，处理了 {len(processed_ids)}/{len(df)} 个样本")
        
        # 每处理10个批次，休息一下让GPU冷却
        if batch_idx % 10 == 9:
            print("暂停10秒让GPU冷却...")
            time.sleep(10)

except KeyboardInterrupt:
    print("用户中断处理，保存当前进度...")
    save_checkpoint(results, checkpoint_file)
    
# 最终评估指标
final_precision = TP / (TP + FP) if (TP + FP) > 0 else 0
final_recall = TP / (TP + FN) if (TP + FN) > 0 else 0
final_f1 = (2 * final_precision * final_recall) / (final_precision + final_recall) if (final_precision + final_recall) > 0 else 0

print("\n最终评估结果:")
print(f"总样本数: {len(df)}")
print(f"处理样本数: {len(processed_ids)}")
print(f"TP: {TP}, FP: {FP}, FN: {FN}")
print(f"Precision: {final_precision:.4f}")
print(f"Recall: {final_recall:.4f}")
print(f"F1 Score: {final_f1:.4f}")

# 保存最终结果
save_checkpoint(results, "event_extraction_final_results.csv")

# 最后清理GPU内存
clear_gpu_memory()
