import torch
from PIL import Image
from transformers import AutoTokenizer, AutoProcessor, LlavaForConditionalGeneration
import pandas as pd
import re
import os
import logging
from datetime import datetime
from collections import defaultdict

class EventMetrics:
    def __init__(self):
        self.event_types = [
            'Contact:Meet',
            'Justice:Arrest-Jail',
            'Conflict:Attack',
            'Conflict:Demonstrate',
            'Movement:Transport',
            'Contact:Phone-Write',
            'Life:Die',
            'Transaction:Transfer-Money'
        ]
        self.metrics = {event_type: {'TP': 0, 'FP': 0, 'FN': 0, 'total': 0} for event_type in self.event_types}
    
    def update(self, true_event, predicted_event):
        if true_event == 'none' or true_event in ['未识别', '显存不足', '图像加载失败']:
            return
            
        # 更新总数
        if true_event in self.metrics:
            self.metrics[true_event]['total'] += 1
            
        # 更新TP/FP/FN
        if true_event in self.metrics:
            if predicted_event == true_event:
                self.metrics[true_event]['TP'] += 1
            else:
                self.metrics[true_event]['FN'] += 1
                
        if predicted_event in self.metrics and predicted_event != true_event:
            self.metrics[predicted_event]['FP'] += 1
    
    def calculate_metrics(self):
        results = []
        for event_type in self.event_types:
            metrics = self.metrics[event_type]
            tp = metrics['TP']
            fp = metrics['FP']
            fn = metrics['FN']
            total = metrics['total']
            
            precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
            recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
            f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
            
            results.append({
                'event_type': event_type,
                'precision': precision,
                'recall': recall,
                'f1': f1,
                'support': total
            })
        return results
    
    def print_metrics(self):
        results = self.calculate_metrics()
        
        # 打印表头
        logging.info("")
        logging.info("-" * 80)
        logging.info(f"{'事件类型':<30} {'精确度':>10} {'召回率':>10} {'F1值':>10} {'支持数':>10}")
        logging.info("-" * 80)
        
        # 打印每种事件类型的指标
        for result in results:
            logging.info(f"{result['event_type']:<30} "
                        f"{result['precision']:>10.4f} "
                        f"{result['recall']:>10.4f} "
                        f"{result['f1']:>10.4f} "
                        f"{result['support']:>10}")
        
        logging.info("-" * 80)

# 配置日志
def setup_logger():
    # 创建日志目录
    if not os.path.exists('logs'):
        os.makedirs('logs')
    
    # 设置日志文件名（包含时间戳）
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    log_file = f'logs/llava_gemma_2b_event_extraction_{timestamp}.log'
    
    # 配置日志格式
    log_format = '%(asctime)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    
    # 创建处理器
    file_handler = logging.FileHandler(log_file, encoding='utf-8')
    file_handler.setFormatter(formatter)
    
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    # 配置根日志记录器
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    
    # 清除所有现有的处理器
    for handler in logger.handlers[:]:
        logger.removeHandler(handler)
    
    # 添加新的处理器
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)
    
    logging.info(f"===== 事件抽取任务开始 =====")
    logging.info(f"日志文件创建: {log_file}")
    return log_file

# 事件抽取提示词
PROMPT = """请仔细阅读以下文本和观察图像内容。输出中不要做其他任何的解释和分析。请根据文本数据和图像数据辅助进行事件抽取，只有以下事件类型可以被选项，不要做其他任何的解释和分析：'Justice:Arrest-Jail','Conflict:Attack','Movement:Transport','Contact:Phone-Write','Life:Die','Conflict:Demonstrate','Transaction:Transfer-Money','Contact:Meet','none'。注意，不要更改或新增其他事件选项。请在输出时仔细分析文本和图片数据。输出时直接输出一种事件选项，不需要添加其他符号。1.一定不要更改或新增其他事件类型。2.请在返回的时候统一格式,仅输出一种事件类型,例如:'Justice:Arrest-Jail'。不要额外解释.仔细分辨每个事件类型的区别。不要做其他任何的解释和分析，这是最重要的一点"""

# 数据路径
DATA_PATH = "/mnt/6t/lyh/1/M2E2/M2E2/text/mee.tsv"
IMAGE_DIR = "/mnt/6t/lyh/1/M2E2/M2E2/image/image/image1"

# 加载模型
def load_model(model_path):
    logging.info(f"正在加载模型: {model_path}")
    model = LlavaForConditionalGeneration.from_pretrained(
        model_path, 
        torch_dtype="auto", 
        device_map="auto"
    )
    processor = AutoProcessor.from_pretrained(
        model_path, 
        torch_dtype="auto", 
        device_map="auto"
    )
    logging.info(f"模型加载完成")
    return model, processor

# 加载图片
def load_image(image_path):
    try:
        image = Image.open(image_path)
        
        # 确保图像是RGB模式
        if image.mode != 'RGB':
            image = image.convert('RGB')
            
        # 设置标准尺寸（可以根据需要调整）
        target_size = (336, 336)  # 或其他合适的尺寸
        image = image.resize(target_size, Image.Resampling.LANCZOS)
        
        return image
    except Exception as e:
        logging.error(f"加载图片时出错 {image_path}: {e}")
        return None

def predict_event(text, image_path, model, processor):
    # 加载图像
    image = load_image(image_path)
    if image is None:
        logging.error(f"图像加载失败: {image_path}")
        return "图像加载失败"

    try:
        # 创建提示词
        prompt = processor.tokenizer.apply_chat_template(
            [
                {
                    'role': 'user',                 
                    'content': (
                        f"{PROMPT}\n<image>\n{text}"
                    )
                }
            ],
            tokenize=False,
            add_generation_prompt=True
        )

        # 准备输入数据
        inputs = processor(
            text=prompt, 
            images=image, 
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512  # 可以根据需要调整
        ).to(model.device)
        
        # 生成结果
        logging.info(f"生成预测结果中...")
        generate_ids = model.generate(
            **inputs,
            max_new_tokens=64,
            do_sample=False,
            num_beams=1
        )
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generate_ids)
        ]

        # 解码输出
        output_text = processor.batch_decode(
            generated_ids_trimmed, 
            skip_special_tokens=True, 
            clean_up_tokenization_spaces=False
        )

        # 返回结果
        extracted_event = extract_event_type(output_text[0]) if output_text else "未识别"
        logging.info(f"提取的事件类型: {extracted_event}")
        
        # 在日志中显示原始输出，处理换行符
        output_str = str(output_text).replace('\n', '\\n')
        logging.debug(f"模型原始输出: {output_str}")
        
        return extracted_event
        
    except torch.cuda.OutOfMemoryError:
        logging.error(f"显存不足，跳过当前数据：{image_path}")
        torch.cuda.empty_cache()
        return "显存不足"
    except Exception as e:
        logging.error(f"预测过程中出错: {e}")
        return f"预测错误: {str(e)}"

# 从输出中提取事件类型
def extract_event_type(text):
    logging.info(f"output_text:{text}")
    # 提取第一个"\n"之前的内容
    if '\n' in text:
        first_line = text.split('\n')[0].strip()
        # 删除所有空格
        extracted = first_line.replace(" ", "")
        logging.debug(f"从'{text}'中提取第一行事件类型: '{extracted}'")
        return extracted
    else:
        # 如果没有换行符，返回整个文本并删除空格
        logging.debug(f"文本中没有换行符，设置为'none'")
        return "none"

# 进行事件抽取预测
def predict_event(text, image_path, model, processor):
    # 加载图像
    image = load_image(image_path)
    if image is None:
        logging.error(f"图像加载失败: {image_path}")
        return "图像加载失败"

    # 创建提示词
    prompt = processor.tokenizer.apply_chat_template(
        [
            {
                'role': 'user',                 
                'content': (
                    f"{PROMPT}\n<image>\n{text}"
                )
            }
        ],
        tokenize=False,
        add_generation_prompt=True
    )

    # 准备输入数据
    inputs = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
    
    try:
        # 生成结果
        logging.info(f"生成预测结果中...")
        generate_ids = model.generate(**inputs, max_new_tokens=64)
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generate_ids)
        ]

        # 解码输出
        output_text = processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )

        # 返回结果
        extracted_event = extract_event_type(output_text[0]) if output_text else "未识别"
        logging.info(f"提取的事件类型: {extracted_event}")
        
        # 在日志中显示原始输出，处理换行符
        output_str = str(output_text).replace('\n', '\\n')
        logging.debug(f"模型原始输出: {output_str}")
        
        return extracted_event
    except torch.cuda.OutOfMemoryError:
        logging.error(f"显存不足，跳过当前数据：{image_path}")
        torch.cuda.empty_cache()
        return "显存不足"
    except Exception as e:
        logging.error(f"预测过程中出错: {e}")
        return f"预测错误: {str(e)}"

def main():
    # 设置日志
    log_file = setup_logger()
    
    # 设置模型路径
    model_path = "/mnt/6t/lyh/1/llava-gemma-2b"
    
    # 加载模型
    model, processor = load_model(model_path)
    
    # 加载数据
    logging.info(f"正在加载数据: {DATA_PATH}")
    data = pd.read_csv(DATA_PATH, sep='\t')
    logging.info(f"数据加载完成，共{len(data)}条记录")
    
    # 初始化结果记录
    results = []
    
    # 初始化评估指标计数器
    TP = 0  # 真正例
    FP = 0  # 假正例
    FN = 0  # 假负例
    
    # 初始化事件类型指标统计
    event_metrics = EventMetrics()
    
    # 遍历数据进行预测
    total = len(data)
    
    for i, row in data.iterrows():
        logging.info("")
        logging.info(f"{'='*50}")
        logging.info(f"示例 {i+1}/{total}")
        logging.info(f"{'='*50}")
        
        image_id = row['#1 ImageID']
        text = row['#2 String']
        true_event = row['#3 String'].replace(" ", "")  # 删除真实值中的空格
        
        image_path = os.path.join(IMAGE_DIR, image_id)
        
        logging.info(f"真实事件类型: {true_event}")
        
        # 预测事件类型
        logging.info(f"开始预测...")
        predicted_event = predict_event(text, image_path, model, processor)
        logging.info(f"预测事件类型: {predicted_event}")
        
        # 更新事件类型指标
        event_metrics.update(true_event, predicted_event)
        
        # 判断TP/FP/FN
        if predicted_event == true_event:
            TP += 1
            result_type = "TP"
        elif predicted_event not in ['none', 'null', '未识别', '显存不足'] and predicted_event != true_event:
            FP += 1
            result_type = "FP"
        elif predicted_event in ['none', 'null', '未识别', '显存不足'] and true_event != 'none':
            FN += 1
            result_type = "FN"
        else:
            result_type = "TN"  # 真负例，不计入指标
        
        # 计算当前的准确率、召回率和F1
        precision = TP / (TP + FP) if TP + FP > 0 else 0
        recall = TP / (TP + FN) if TP + FN > 0 else 0
        f1 = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0
        
        logging.info(f"结果类型: {result_type}")
        logging.info(f"当前统计: TP={TP}, FP={FP}, FN={FN}")
        logging.info(f"当前指标: Precision={precision:.4f}, Recall={recall:.4f}, F1={f1:.4f}")
        logging.info(f"{'='*50}")
        
        # 记录结果
        results.append({
            'image_id': image_id,
            'text': text,
            'true_event': true_event,
            'predicted_event': predicted_event,
            'result_type': result_type,
            'is_correct': predicted_event == true_event,
            'TP': TP,
            'FP': FP,
            'FN': FN,
            'precision': precision,
            'recall': recall,
            'f1': f1
        })
    
    # 最终评估指标
    precision = TP / (TP + FP) if TP + FP > 0 else 0
    recall = TP / (TP + FN) if TP + FN > 0 else 0
    f1_score = (2 * precision * recall) / (precision + recall) if precision + recall > 0 else 0
    
    logging.info("")
    logging.info(f"{'#'*60}")
    logging.info(f"总体结果统计")
    logging.info(f"{'#'*60}")
    logging.info(f"总样本数: {total}")
    logging.info(f"TP (真正例): {TP}")
    logging.info(f"FP (假正例): {FP}")
    logging.info(f"FN (假负例): {FN}")
    logging.info(f"精确率 (Precision): {precision:.4f}")
    logging.info(f"召回率 (Recall): {recall:.4f}")
    logging.info(f"F1 值 (F1-Score): {f1_score:.4f}")
    logging.info(f"{'#'*60}")
    
    # 打印每种事件类型的指标
    logging.info("")
    logging.info(f"{'#'*60}")
    logging.info(f"各事件类型指标统计")
    logging.info(f"{'#'*60}")
    event_metrics.print_metrics()
    
    # 保存结果
    results_file = f'event_extraction_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv'
    results_df = pd.DataFrame(results)
    results_df.to_csv(results_file, index=False)
    logging.info(f"结果已保存至 {results_file}")
    logging.info(f"日志已保存至 {log_file}")
    logging.info(f"{'='*60}")
    logging.info("事件抽取任务完成")
    logging.info(f"{'='*60}")

if __name__ == "__main__":
    main() 