import pandas as pd
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration
from PIL import Image
import numpy as np
import logging
import os
import datetime
from pathlib import Path
import sys

# 创建logs目录（如果不存在）
logs_dir = Path('logs')
logs_dir.mkdir(exist_ok=True)

# 设置日志
current_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_file = logs_dir / f'llava_7b_EE_{current_time}.log'

# 配置logging - 输出到文件和控制台
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

# 确保日志器没有重复的处理器
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# 创建一个格式化器
formatter = logging.Formatter('%(asctime)s - %(levelname)s - [%(module)s:%(funcName)s:%(lineno)d] - %(message)s')

# 文件处理器 (File Handler)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

# 控制台处理器 (Console Handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)

logger.info("开始运行LLaVA事件抽取任务")
print("开始运行LLaVA事件抽取任务")

# 数据路径
data_path = r'/mnt/6t/lyh/data/M2E2/M2E2/text/mee.tsv'
logger.info(f"数据路径: {data_path}")
print(f"数据路径: {data_path}")

data = pd.read_csv(data_path, sep='\t')
logger.info(f"读取数据集完成，共有{len(data)}条记录")
print(f"读取数据集完成，共有{len(data)}条记录")

# 加载 LLaVA 模型和处理器
model_path = '/mnt/6t/lyh/1/llava-1.5-7b-hf'
logger.info(f"模型路径: {model_path}")
print(f"模型路径: {model_path}")

# 优化加载选项
load_config = {
    "torch_dtype": "auto",
    "device_map": "auto"
}

logger.info("开始加载模型...")
print("开始加载模型...")
processor = AutoProcessor.from_pretrained(model_path)
model = LlavaForConditionalGeneration.from_pretrained(
    model_path,
    **load_config
)
logger.info("模型加载完成")
print("模型加载完成")

# 图像路径设置
image_base_path = '/mnt/6t/lyh/data/M2E2/M2E2/image/image/image'
logger.info(f"图像路径: {image_base_path}")
print(f"图像路径: {image_base_path}")

def load_image(image_path):
    """读取图像文件"""
    try:
        image = Image.open(image_path).convert("RGB")
        return image
    except (IOError, ValueError, Image.UnidentifiedImageError) as e:
        logger.error(f"图像文件损坏或无法读取: {image_path}, 错误信息: {e}")
        print(f"图像文件损坏或无法读取，跳过当前数据：{image_path}, 错误信息：{e}")
        return "2"  # 返回特定值表示图像加载失败

def convert_to_nan(value):
    """将'nan'字符串转换为实际的NaN值"""
    if isinstance(value, str) and value.lower() == 'nan':
        return np.nan
    return value

def clean_output_text(text):
    """清理模型输出的文本，去除转义字符和多余的空白"""
    if text is None:
        return 'none'
        
    # 去除常见转义字符
    text = text.replace('\\n', '').replace('\\t', '').replace('\\r', '')
    # 去除开头和结尾的引号
    text = text.strip('"\'')
    # 去除开头和结尾的空白字符
    text = text.strip()
    # 将多个空格替换为单个空格
    text = ' '.join(text.split())
    # 转换为小写以忽略大小写
    text = text.lower()
    return text

def local_ee_predict(text, image_path):
    """使用LLaVA模型进行事件抽取"""
    logger.info(f"处理图像: {image_path}")
    raw_image = load_image(image_path)
    
    if raw_image == "2":
        logger.error(f"图像加载失败: {image_path}")
        return "2"  # 表示图像加载失败

    prompt = (
        "请仔细阅读以下文本和观察图像内容。请根据文本数据和图像数据辅助进行事件抽取，有以下事件抽取选项：'Justice:Arrest-Jail','Conflict:Attack','Movement:Transport','Contact:Phone-Write','Life:Die','Conflict:Demonstrate','Transaction:Transfer-Money','Contact:Meet','none'。注意，不要更改或新增其他事件选项。请在输出时仔细分析文本和图片数据。输出时直接输出选项，不需要添加其他符号。"
    )

    # 构造会话格式
    conversation = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
                {"type": "text", "text": text},
                {"type": "image"},
            ],
        },
    ]

    # 准备输入
    prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
    inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device)

    # 清空缓存，避免显存过度增长
    torch.cuda.empty_cache()
    
    try:
        generated_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False)

        # 截断，只保留模型生成的部分
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]

        # 解码输出
        output_text = processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )

        # 添加日志记录原始输出
        raw_output = output_text[0] if output_text else ""
        logger.info(f"LLaVA EE模型原始输出: '{raw_output}'")

        # 清理输出文本
        cleaned_output = clean_output_text(raw_output)

        return cleaned_output

    except torch.cuda.OutOfMemoryError:
        logger.error(f"显存不足，跳过当前数据：{image_path}")
        print(f"显存不足，跳过当前数据：{image_path}")
        torch.cuda.empty_cache()
        return "1"  # 表示显存不足
    except Exception as e:
        logger.error(f"模型生成时发生错误: {e}")
        print(f"模型生成时发生错误: {e}")
        return "error"  # 表示生成错误

# 初始化计数器和允许的事件类型列表
TP = 0
FP = 0
FN = 0
allowed_events = [
    'justice:arrest-jail',
    'conflict:attack',
    'movement:transport',
    'contact:phone-write',
    'life:die',
    'conflict:demonstrate',
    'transaction:transfer-money',
    'contact:meet',
    'none'
]

# 创建字典来跟踪每个事件类型的TP、FP、FN
event_stats = {}
for event in allowed_events:
    event_stats[event] = {"TP": 0, "FP": 0, "FN": 0, "support": 0}

logger.info("开始处理数据...")
print("开始处理数据...")

for i, row in data.iterrows():
    print(f"\n--- Example {i+1}/{len(data)} ---")

    # 构建图像路径
    image_path = f"{image_base_path}/{row['#1 ImageID']}"
    print(f"图像路径: {image_path}")
    
    # 获取真实事件类型并转换为小写
    true_ee = clean_output_text(row['#3 String'])

    # 检查图像文件是否存在
    if not os.path.exists(image_path):
        logger.error(f"样本[{i+1}] 错误: 图像不存在 - {image_path}")
        print(f"错误: 图像文件不存在: {image_path}")
        continue

    # 更新真实事件的支持数
    if true_ee in event_stats:
        event_stats[true_ee]["support"] += 1
    
    # 执行事件抽取任务
    try:
        # 简化控制台输出，保留关键信息
        print(f"原文: {row['#2 String'][:100]}...")

        # 使用事件抽取函数
        ee_result = local_ee_predict(row['#2 String'], image_path)

        # 处理预测结果
        ee_relation = ee_result
        ee_relation = convert_to_nan(ee_relation)
        
        # 清理预测结果和真实标签，确保公平比较
        clean_ee_relation = ee_relation  # 已经是清理过的了
        clean_true_ee = true_ee  # 已经是清理过的了
        
        print(f"预测事件: {clean_ee_relation}")
        print(f"真实事件: {clean_true_ee}")

        # 记录到日志
        logger.info(f"样本[{i+1}] 真实事件: '{clean_true_ee}' | 预测事件: '{clean_ee_relation}'")

        # 1. 如果预测值与真实值相等
        if clean_ee_relation == clean_true_ee:
            TP += 1
            if clean_ee_relation in event_stats:
                event_stats[clean_ee_relation]["TP"] += 1
            print("结果: 正确 (TP)")
            
        # 2. 如果预测值不为none且与真实值不相等
        elif clean_ee_relation != 'none':
            FP += 1
            if clean_ee_relation in event_stats:
                event_stats[clean_ee_relation]["FP"] += 1
            
            # 如果真实值不为none，则真实值对应的类别FN+1
            if clean_true_ee != 'none' and clean_true_ee in event_stats:
                event_stats[clean_true_ee]["FN"] += 1
            print("结果: 错误预测 (FP)")
            
        # 3. 如果预测值为none且真实值不为none
        elif clean_ee_relation == 'none' and clean_true_ee != 'none':
            FN += 1
            if clean_true_ee in event_stats:
                event_stats[clean_true_ee]["FN"] += 1
            print("结果: 漏检 (FN)")

        # 输出当前累计的TP、FP、FN数量
        print(f"当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")

        # 评估统计 - 记录到日志
        logger.info(f"样本[{i+1}] 结果: {'TP' if clean_ee_relation == clean_true_ee else 'FP' if clean_ee_relation != 'none' else 'FN'} | 当前统计: TP={TP}, FP={FP}, FN={FN}")

    except Exception as e:
        logger.error(f"样本[{i+1}] 处理错误: {e}")
        print(f"处理样本时发生错误: {e}")
        import traceback
        logger.error(traceback.format_exc())
        print(traceback.format_exc())
        FN += 1  # 如果解析出错，计为FN
        print(f"由于解析错误，将此样本计为FN。当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")
        logger.info(f"由于解析错误，将此样本计为FN。当前累计 - TP: {TP}, FP: {FP}, FN: {FN}")

    # 空行分隔不同样本
    logger.info("")

# 计算评估指标
precision = TP / (TP + FP) if TP + FP > 0 else 0
recall = TP / (TP + FN) if TP + FN > 0 else 0
f1_score = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0

# 输出最终结果
logger.info("\n--- 结果统计 ---")
print("\n--- 结果统计 ---")
logger.info(f"TP (True Positives): {TP}")
logger.info(f"FP (False Positives): {FP}")
logger.info(f"FN (False Negatives): {FN}")
logger.info(f"精确度 (Precision): {precision:.4f}")
logger.info(f"召回率 (Recall): {recall:.4f}")
logger.info(f"F1 值 (F1-Score): {f1_score:.4f}")
print(f"TP (True Positives): {TP}")
print(f"FP (False Positives): {FP}")
print(f"FN (False Negatives): {FN}")
print(f"精确度 (Precision): {precision:.4f}")
print(f"召回率 (Recall): {recall:.4f}")
print(f"F1 值 (F1-Score): {f1_score:.4f}")

# 输出每个事件类型的统计信息
logger.info("\n--------------------------------------------------------------------------------")
logger.info("{:<25} {:<10} {:<10} {:<10} {:<10}".format("事件类型", "精确度", "召回率", "F1值", "支持数"))
logger.info("--------------------------------------------------------------------------------")
print("\n--------------------------------------------------------------------------------")
print("{:<25} {:<10} {:<10} {:<10} {:<10}".format("事件类型", "精确度", "召回率", "F1值", "支持数"))
print("--------------------------------------------------------------------------------")

total_support = 0
macro_precision = 0
macro_recall = 0
macro_f1 = 0
weighted_precision = 0
weighted_recall = 0
weighted_f1 = 0
valid_event_count = 0

for event, stats in event_stats.items():
    event_precision = stats["TP"] / (stats["TP"] + stats["FP"]) if (stats["TP"] + stats["FP"]) > 0 else 0
    event_recall = stats["TP"] / (stats["TP"] + stats["FN"]) if (stats["TP"] + stats["FN"]) > 0 else 0
    event_f1 = 2 * event_precision * event_recall / (event_precision + event_recall) if (event_precision + event_recall) > 0 else 0
    
    # 只计算有支持数的事件类型
    if stats["support"] > 0:
        valid_event_count += 1
        macro_precision += event_precision
        macro_recall += event_recall
        macro_f1 += event_f1
        
    total_support += stats["support"]
    weighted_precision += event_precision * stats["support"]
    weighted_recall += event_recall * stats["support"]
    weighted_f1 += event_f1 * stats["support"]
    
    logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format(
        event, event_precision, event_recall, event_f1, stats["support"]))
    print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format(
        event, event_precision, event_recall, event_f1, stats["support"]))

# 计算宏平均和加权平均
if valid_event_count > 0:
    macro_precision /= valid_event_count
    macro_recall /= valid_event_count
    macro_f1 /= valid_event_count

if total_support > 0:
    weighted_precision /= total_support
    weighted_recall /= total_support
    weighted_f1 /= total_support

logger.info("--------------------------------------------------------------------------------")
logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("宏平均", macro_precision, macro_recall, macro_f1, total_support))
logger.info("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("加权平均", weighted_precision, weighted_recall, weighted_f1, total_support))
logger.info("================================================================================")
print("--------------------------------------------------------------------------------")
print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("宏平均", macro_precision, macro_recall, macro_f1, total_support))
print("{:<25} {:<10.4f} {:<10.4f} {:<10.4f} {:<10}".format("加权平均", weighted_precision, weighted_recall, weighted_f1, total_support))
print("================================================================================")

logger.info(f"日志已保存到: {log_file}")
print(f"日志已保存到: {log_file}")
