import json
import torch
import numpy as np
import time
from tqdm import tqdm
from colorama import Fore, Style, init
from bert import Bert
from utils import convert_examples_to_features
from transformers import BertTokenizer
from run_ernie import ErnieConfig
from run_large_roberta_pair import RobertaPairConfig
from run_large_roberta_wwm_ext import RobertaLargeConfig

# 初始化颜色输出
init(autoreset=True)

# 打印启动信息
print(Fore.CYAN + "=" * 70)
print(Fore.YELLOW + "🚀 文本相似度预测系统启动")
print(Fore.CYAN + "=" * 70)

# 选择要加载的模型类型和路径
model_name = 'wwm'
model_path = '../my_model/best_roberta_wwm_large.pkl'

# 根据模型名称初始化对应的配置类
config = None
if model_name == 'ernie':
    config = ErnieConfig()
    print(Fore.GREEN + f"✅ 使用ERNIE模型配置 | 最大序列长度: {config.pad_size}")
elif model_name == 'pair':
    config = RobertaPairConfig()
    print(Fore.GREEN + f"✅ 使用RobertaPair模型配置 | 最大序列长度: {config.pad_size}")
elif model_name == 'wwm':
    config = RobertaLargeConfig()
    print(Fore.GREEN + f"✅ 使用RobertaLarge模型配置 | 最大序列长度: {config.pad_size}")

# 检查 CUDA 是否可用，设置计算设备
device_name = 'CUDA' if torch.cuda.is_available() else 'CPU'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(
    Fore.BLUE + f"💻 计算设备: {device_name}{' (' + torch.cuda.get_device_name(0) + ')' if torch.cuda.is_available() else ''}")

# 初始化模型并将其移至指定设备
model = Bert(config).to(device)

# 加载预训练模型权重
print(Fore.MAGENTA + "⏳ 加载模型权重...", end='')
load_start = time.time()
state_dict = torch.load(model_path, map_location=device)

# 移除不需要的键（如position_ids，在某些版本中可能不需要）
if 'bert.embeddings.position_ids' in state_dict:
    del state_dict['bert.embeddings.position_ids']

# 加载模型状态并设置为评估模式
model.load_state_dict(state_dict)
model.eval()
load_time = time.time() - load_start
print(Fore.GREEN + f" 完成! 耗时: {load_time:.2f}s")

# 初始化分词器
print(Fore.MAGENTA + "🔤 初始化分词器...", end='')
tokenizer_start = time.time()
tokenizer = BertTokenizer.from_pretrained(config.tokenizer_file)
tokenizer_time = time.time() - tokenizer_start
print(Fore.GREEN + f" 完成! 耗时: {tokenizer_time:.2f}s")

# 读取测试数据并写入预测结果
test_file = '../data/KUAKE/KUAKE-QQR_test.json'
output_file = f'../prediction_result/{model_name}_pred.json'

print(Fore.CYAN + "\n" + "=" * 70)
print(Fore.YELLOW + f"📊 开始预测 | 测试文件: {test_file}")
print(Fore.CYAN + "=" * 70)

with open(test_file, 'r', encoding='UTF-8') as input_data, \
        open(output_file, 'w', encoding='UTF-8') as output_data:
    # 加载JSON数据
    json_content = json.load(input_data)
    total_records = len(json_content)
    print(Fore.BLUE + f"📂 载入{total_records}条测试数据")

    # 创建进度条
    progress_bar = tqdm(
        total=total_records,
        bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
        desc=f"{Fore.YELLOW}🚀 预测进度"
    )

    inference_times = []
    start_time = time.time()

    # 逐条处理测试数据
    for i, block in enumerate(json_content):
        record_start = time.time()

        # 提取两条查询语句
        query1 = block['query1']
        query2 = block['query2']

        # 将文本转换为模型可接受的特征格式
        feature = convert_examples_to_features(
            examples=[[block['query1'], block['query2'], 0]],  # 构建样本 [query1, query2, 标签]
            tokenizer=tokenizer,
            max_length=config.pad_size,  # 最大序列长度
            data_type='test'  # 数据类型为测试集
        )
        feature = feature[0]  # 获取第一个样本的特征

        # 准备模型输入张量
        input_ids = torch.tensor(np.array(feature.input_ids)).unsqueeze(0).to(device)
        attention_mask = torch.tensor(np.array(feature.attention_mask)).unsqueeze(0).to(device)
        token_type_ids = torch.tensor(np.array(feature.token_type_ids)).unsqueeze(0).to(device)

        # 模型推理
        with torch.no_grad():
            output, _ = model(input_ids, attention_mask, token_type_ids, labels=None)

        # 获取预测结果
        predicted_class = torch.max(output.data, 1)[1].cpu().numpy()
        block['label'] = str(*predicted_class)  # 将预测结果添加到数据块中

        # 计算并记录推理时间
        inference_time = time.time() - record_start
        inference_times.append(inference_time)

        # 更新进度条
        progress_bar.set_postfix({
            "当前样本": f"{i + 1}/{total_records}",
            "推理时间": f"{inference_time:.3f}s",
            "Q1": f"{query1[:10]}...",
            "Q2": f"{query2[:10]}..."
        })
        progress_bar.update(1)

    progress_bar.close()

    # 计算性能指标
    total_time = time.time() - start_time
    avg_inference = np.mean(inference_times) * 1000  # 转换为毫秒
    records_per_sec = total_records / total_time

    # 打印性能报告
    print(Fore.CYAN + "\n" + "=" * 70)
    print(Fore.GREEN + "✅ 预测完成! 性能报告:")
    print(Fore.CYAN + "-" * 70)
    print(Fore.BLUE + f"⏱️ 总耗时: {total_time:.2f} 秒")
    print(Fore.BLUE + f"📊 处理样本数: {total_records} 条")
    print(Fore.BLUE + f"⚡ 平均推理时间: {avg_inference:.2f} 毫秒/条")
    print(Fore.BLUE + f"🚀 处理速度: {records_per_sec:.2f} 条/秒")
    print(Fore.CYAN + "=" * 70)

    # 保存结果
    print(Fore.MAGENTA + f"💾 保存预测结果到: {output_file}")
    json.dump(json_content, output_data, indent=2, ensure_ascii=False)

print(Fore.YELLOW + "\n✨ 所有操作已完成! 程序退出")