import pandas as pd
import torch
import numpy as np
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration
from PIL import Image
import logging
import os
import datetime
from pathlib import Path
import base64  # 添加base64模块导入

# 创建logs目录（如果不存在）
logs_dir = Path('logs')
logs_dir.mkdir(exist_ok=True)

# 设置日志
current_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_file = logs_dir / f'llava_re_next_EE_{current_time}.log'

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(log_file)
    ]
)
logger = logging.getLogger(__name__)

logger.info("开始运行关系抽取任务")
print("开始运行关系抽取任务")

# 数据路径设置
data_path = r'/mnt/6t/lyh/data/M2E2/M2E2/text/mee.tsv'
logger.info(f"数据路径: {data_path}")
print(f"数据路径: {data_path}")
data = pd.read_csv(data_path, sep='\t')
logger.info(f"读取数据集完成，共有{len(data)}条记录")
print(f"读取数据集完成，共有{len(data)}条记录")

# 模型加载
model_path = "/mnt/6t/lyh/1/llama3-llava-next-8b-hf"
logger.info(f"模型路径: {model_path}")
print(f"模型路径: {model_path}")

# 优化加载选项
load_config = {
    "low_cpu_mem_usage": True,
    "torch_dtype": torch.float16,
    "device_map": "auto",
    "local_files_only": True
}

logger.info("开始加载模型...")
print("开始加载模型...")
processor = LlavaNextProcessor.from_pretrained(model_path, **load_config)
model = LlavaNextForConditionalGeneration.from_pretrained(model_path, **load_config)
logger.info("模型加载完成")
print("模型加载完成")

# 图像路径设置
image_base_path = '/mnt/6t/lyh/data/M2E2/M2E2/image/image/image'
logger.info(f"图像路径: {image_base_path}")
print(f"图像路径: {image_base_path}")

def load_image(image_path):
    image = Image.open(image_path).convert("RGB")
    return image

def convert_to_nan(value):
    if isinstance(value, str) and value.lower() == 'nan':
        return np.nan
    return value

def clean_output_text(text):
    """清理模型输出的文本，去除转义字符和多余的空白"""
    # 去除常见转义字符
    text = text.replace('\\n', '').replace('\\t', '').replace('\\r', '')
    # 去除开头和结尾的引号
    text = text.strip('"\'')
    # 去除开头和结尾的空白字符
    text = text.strip()
    # 将多个空格替换为单个空格
    text = ' '.join(text.split())
    # 转换为小写以忽略大小写
    text = text.lower()
    return text

def local_ner_predict(text, image_path):
    logger.info(f"处理图像: {image_path}")
    raw_image = load_image(image_path)
    
    prompt = (
        "请仔细阅读以下文本和观察图像内容。请根据文本数据和图像数据辅助进行事件抽取，有以下事件抽取选项：'Justice:Arrest-Jail','Conflict:Attack','Movement:Transport','Contact:Phone-Write','Life:Die','Conflict:Demonstrate','Transaction:Transfer-Money','Contact:Meet','none'。注意，不要更改或新增其他事件选项。请在输出时仔细分析文本和图片数据。输出时直接输出选项，不需要添加其他符号。"
    )
    
    conversation = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
                {"type": "text", "text": text},
                {"type": "image"},
            ],
        },
    ]

    prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
    inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device)

    # 清空缓存，避免显存过度增长
    torch.cuda.empty_cache()

    generated_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    
    # 清理输出文本
    cleaned_output = clean_output_text(output_text[0]) if output_text else "none"
    
    
    relation = cleaned_output
    
    return relation

# 初始化计数器和允许的关系列表
TP = 0
FP = 0
FN = 0
allowed_relations = [
    'justice:arrest-jail',
    'conflict:attack',
    'movement:transport',
    'contact:phone-write',
    'life:die',
    'conflict:demonstrate',
    'transaction:transfer-money',
    'contact:meet',
    'none'
    
]

# 创建字典来跟踪每个事件类型的TP、FP、FN
event_stats = {}
for relation in allowed_relations:
    event_stats[relation] = {"TP": 0, "FP": 0, "FN": 0, "support": 0}

logger.info("开始处理数据...")
print("开始处理数据...")
for i, row in data.iterrows():
    print("\n--- Example ---")
    image_path = f"/mnt/6t/lyh/data/M2E2/M2E2/image/image/image/{row['#1 ImageID']}"
    print(image_path)
    
    # 获取真实事件类型并转换为小写
    true_ee = clean_output_text(row['#3 String'])
    
    re_result = local_ner_predict(row['#2 String'], image_path)
    ttext = row['#2 String']

    
    # 更新真实事件的支持数
    if true_ee in event_stats:
        event_stats[true_ee]["support"] += 1

    try:
        re_relation = re_result
        re_relation = convert_to_nan(re_relation)
        
        # 清理预测结果和真实标签，确保公平比较
        clean_re_relation = re_relation  # 已经是清理过的了
        clean_true_ee = true_ee  # 已经是清理过的了
        
        print(f"清理后预测事件: {clean_re_relation}")
        print(f"清理后真实事件: {clean_true_ee}")

        # 1. 如果预测值与真实值相等
        if clean_re_relation == clean_true_ee:
            TP += 1
            if clean_re_relation in event_stats:
                event_stats[clean_re_relation]["TP"] += 1
            print("结果: 正确 (TP)")

        # 2. 如果预测值不为none且与真实值不相等
        elif clean_re_relation != 'none':
            FP += 1
            if clean_re_relation in event_stats:
                event_stats[clean_re_relation]["FP"] += 1
            
            # 如果真实值不为none，则真实值对应的类别FN+1
            if clean_true_ee != 'none' and clean_true_ee in event_stats:
                event_stats[clean_true_ee]["FN"] += 1
            print("结果: 错误预测 (FP)")

        # 3. 如果预测值为none且真实值不为none
        elif clean_re_relation == 'none' and clean_true_ee != 'none':
            FN += 1
            if clean_true_ee in event_stats:
                event_stats[clean_true_ee]["FN"] += 1
            print("结果: 漏检 (FN)")
            
        # 输出当前累计的TP、FP、FN数量
        print(f"当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")

    except Exception as e:
        print("解析预测结果时发生错误:", e)
        logger.error(f"解析预测结果时发生错误: {e}")
        import traceback
        logger.error(traceback.format_exc())
        print(traceback.format_exc())
        FN += 1  # 如果解析出错，计为FN
        print(f"由于解析错误，将此样本计为FN。当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")
        logger.info(f"由于解析错误，将此样本计为FN。当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")
        
# 计算评估指标
precision = TP / (TP + FP) if TP + FP > 0 else 0
recall = TP / (TP + FN) if TP + FN > 0 else 0
f1_score = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0

# 输出最终结果
logger.info("\n--- 结果统计 ---")
print("\n--- 结果统计 ---")
logger.info(f"TP (True Positives): {TP}")
logger.info(f"FP (False Positives): {FP}")
logger.info(f"FN (False Negatives): {FN}")
logger.info(f"精确度 (Precision): {precision:.4f}")
logger.info(f"召回率 (Recall): {recall:.4f}")
logger.info(f"F1 值 (F1-Score): {f1_score:.4f}")
print(f"TP (True Positives): {TP}")
print(f"FP (False Positives): {FP}")
print(f"FN (False Negatives): {FN}")
print(f"精确度 (Precision): {precision:.4f}")
print(f"召回率 (Recall): {recall:.4f}")
print(f"F1 值 (F1-Score): {f1_score:.4f}")

# 输出每个事件类型的统计信息
logger.info("\n--------------------------------------------------------------------------------")
logger.info("{:<25} {:<10} {:<10} {:<10} {:<10}".format("事件类型", "精确度", "召回率", "F1值", "支持数"))
logger.info("--------------------------------------------------------------------------------")
print("\n--------------------------------------------------------------------------------")
print("{:<25} {:<10} {:<10} {:<10} {:<10}".format("事件类型", "精确度", "召回率", "F1值", "支持数"))
print("--------------------------------------------------------------------------------")

total_support = 0
macro_precision = 0
macro_recall = 0
macro_f1 = 0
weighted_precision = 0
weighted_recall = 0
weighted_f1 = 0
valid_event_count = 0

for event, stats in event_stats.items():
    event_precision = stats["TP"] / (stats["TP"] + stats["FP"]) if (stats["TP"] + stats["FP"]) > 0 else 0
    event_recall = stats["TP"] / (stats["TP"] + stats["FN"]) if (stats["TP"] + stats["FN"]) > 0 else 0
    event_f1 = 2 * event_precision * event_recall / (event_precision + event_recall) if (event_precision + event_recall) > 0 else 0
    
    # 只计算有支持数的事件类型
    if stats["support"] > 0:
        valid_event_count += 1
        macro_precision += event_precision
        macro_recall += event_recall
        macro_f1 += event_f1
        
    total_support += stats["support"]
    weighted_precision += event_precision * stats["support"]
    weighted_recall += event_recall * stats["support"]
    weighted_f1 += event_f1 * stats["support"]
    
    logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format(
        event, event_precision, event_recall, event_f1, stats["support"]))
    print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format(
        event, event_precision, event_recall, event_f1, stats["support"]))

# 计算宏平均和加权平均
if valid_event_count > 0:
    macro_precision /= valid_event_count
    macro_recall /= valid_event_count
    macro_f1 /= valid_event_count

if total_support > 0:
    weighted_precision /= total_support
    weighted_recall /= total_support
    weighted_f1 /= total_support

logger.info("--------------------------------------------------------------------------------")
logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("宏平均", macro_precision, macro_recall, macro_f1, total_support))
logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("加权平均", weighted_precision, weighted_recall, weighted_f1, total_support))
logger.info("================================================================================")
print("--------------------------------------------------------------------------------")
print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("宏平均", macro_precision, macro_recall, macro_f1, total_support))
print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("加权平均", weighted_precision, weighted_recall, weighted_f1, total_support))
print("================================================================================")

logger.info(f"日志已保存到: {log_file}")
print(f"日志已保存到: {log_file}")
