import json
import torch
from transformers import BertTokenizer, BertForTokenClassification
from torch.utils.data import DataLoader
from train import TargetExtractionDataset, evaluate, Config
from sklearn.metrics import classification_report
import numpy as np

config = Config()

def load_test_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        return json.load(f)

def extract_targets(model, tokenizer, text, device):
    """从文本中提取评价对象"""
    # 预处理输入
    encoding = tokenizer(
        text,
        return_offsets_mapping=True,
        truncation=True,
        max_length=config.max_length,
        padding='max_length',
        return_tensors='pt'
    )
    
    input_ids = encoding['input_ids'].to(device)
    attention_mask = encoding['attention_mask'].to(device)
    offset_mapping = encoding['offset_mapping'][0].numpy()
    
    # 预测
    model.eval()
    with torch.no_grad():
        outputs = model(input_ids=input_ids, attention_mask=attention_mask)
        predictions = torch.argmax(outputs.logits, dim=2)
    
    # 后处理预测结果
    pred_labels = predictions[0].cpu().numpy()
    targets = []
    current_target = ""
    current_indices = []
    
    for idx, (pred, offset) in enumerate(zip(pred_labels, offset_mapping)):
        # 跳过特殊token和padding
        if offset[0] == 0 and offset[1] == 0:
            continue
            
        # 处理B-TARGET标签
        if pred == 1:  # B-TARGET
            if current_target:
                targets.append((current_target, current_indices))
                current_target = ""
                current_indices = []
            
            char_start, char_end = offset
            if char_start < len(text) and char_end <= len(text):
                current_target = text[char_start:char_end]
                current_indices = [char_start, char_end]
                
        # 处理I-TARGET标签
        elif pred == 2:  # I-TARGET
            char_start, char_end = offset
            if char_start < len(text) and char_end <= len(text):
                current_target += text[char_start:char_end]
                current_indices[1] = char_end
    
    # 添加最后一个目标(如果有)
    if current_target:
        targets.append((current_target, current_indices))
    
    # 返回所有识别出的评价对象
    return [target for target, _ in targets]

def main():
    # 加载测试数据
    test_data = load_test_data('data/test_processed.json')
    
    # 加载模型和tokenizer
    model_path = f"{config.output_dir}/best_model"
    tokenizer_path = f"{config.output_dir}/tokenizer"
    
    tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
    model = BertForTokenClassification.from_pretrained(model_path)
    model.to(config.device)
    
    # 评估模型
    test_dataset = TargetExtractionDataset(test_data, tokenizer, config.max_length)
    test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size)
    
    test_metrics = evaluate(model, test_dataloader, config.device)
    print("测试集评估结果:")
    print(f"准确率: {test_metrics['accuracy']:.4f}")
    print(f"精确率: {test_metrics['precision']:.4f}")
    print(f"召回率: {test_metrics['recall']:.4f}")
    print(f"F1分数: {test_metrics['f1']:.4f}")
    
    # 在测试集上进行评价对象提取
    correct = 0
    total = len(test_data)
    
    results = []
    for item in test_data:
        sentence = item['sentence']
        true_target = item['target']
        
        # 预测评价对象
        predicted_targets = extract_targets(model, tokenizer, sentence, config.device)
        
        # 检查是否正确提取
        is_correct = true_target in predicted_targets
        if is_correct:
            correct += 1
            
        # 记录结果
        results.append({
            "sentence": sentence,
            "true_target": true_target,
            "predicted_targets": predicted_targets,
            "is_correct": is_correct
        })
    
    # 计算准确率
    accuracy = correct / total
    print(f"评价对象提取准确率: {accuracy:.4f}")
    
    # 保存评估结果
    with open('data/evaluation_results.json', 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)

if __name__ == "__main__":
    main()