#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import json
import logging
import sys
import numpy as np
from scipy.spatial.distance import cosine

# 导入text2vec库
from text2vec import SentenceModel

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def add_sentencebert_to_json(input_file, output_file=None):
    """
    读取JSON结果文件，计算每个结果的中文Sentence-BERT相似度，并将结果添加到原文件中
    
    Args:
        input_file: 输入JSON文件路径
        output_file: 输出JSON文件路径，如果为None则覆盖原文件
    """
    # 如果未指定输出文件，覆盖原文件
    if output_file is None:
        output_file = input_file
    
    logger.info(f"开始处理文件: {input_file}")
    
    # 检查输入文件是否存在
    if not os.path.exists(input_file):
        logger.error(f"输入文件不存在: {input_file}")
        return
    
    # 读取JSON文件
    try:
        with open(input_file, 'r', encoding='utf-8') as f:
            logger.info("正在读取JSON文件...")
            results = json.load(f)
            logger.info(f"成功读取JSON文件，包含 {len(results)} 条记录")
    except json.JSONDecodeError as e:
        logger.error(f"解析JSON文件失败: {str(e)}")
        return
    except Exception as e:
        logger.error(f"读取JSON文件时发生错误: {str(e)}")
        return
    
    # 初始化text2vec模型
    try:
        # 使用本地模型路径
        local_model_path = "/mnt/ssd/jsj/models/models/Jerry0/text2vec-large-chinese"
        logger.info(f"正在加载text2vec模型: {local_model_path}")
        model = SentenceModel(local_model_path)
        logger.info("text2vec模型加载成功")
    except Exception as e:
        logger.error(f"加载text2vec模型失败: {str(e)}")
        return
    
    # 处理嵌套的结果结构并收集相似度分数
    total_count = 0
    processed_count = 0
    error_count = 0
    sentencebert_scores = []
    
    # 计算总的记录数
    for experiment in results:
        if 'results' in experiment:
            total_count += len(experiment['results'])
    
    logger.info(f"总共发现 {total_count} 条需要处理的记录")
    
    # 处理每条实验结果
    current_record_index = 0
    for exp_idx, experiment in enumerate(results):
        try:
            # 检查是否有results数组
            if 'results' not in experiment:
                logger.warning(f"实验记录 {exp_idx} 缺少results数组，跳过")
                continue
            
            # 处理每个嵌套的结果
            for i, result in enumerate(experiment['results']):
                try:
                    current_record_index += 1
                    
                    # 检查是否有必要的字段
                    if 'reference' not in result or 'hypothesis' not in result:
                        logger.warning(f"记录 {current_record_index}/{total_count} 缺少必要的reference或hypothesis字段，跳过")
                        error_count += 1
                        continue
                    
                    # 检查是否已经计算过中文Sentence-BERT相似度
                    metrics = result.get('text_metrics', {})
                    score = metrics.get('chinese_sentence_bert_similarity')
                    
                    if score is not None:
                        logger.info(f"记录 {current_record_index}/{total_count} 已经包含chinese_sentence_bert_similarity，跳过")
                        sentencebert_scores.append(score)
                        processed_count += 1
                        continue
                    
                    # 计算中文Sentence-BERT相似度
                    reference = result['reference']
                    hypothesis = result['hypothesis']
                    
                    logger.info(f"处理记录 {current_record_index}/{total_count}: 计算中文text2vec相似度")
                    
                    # 使用text2vec计算嵌入
                    embeddings = model.encode([reference, hypothesis])
                    # 计算余弦相似度
                    similarity = 1 - cosine(embeddings[0], embeddings[1])
                    
                    # 将相似度结果转换为Python原生float类型以支持JSON序列化
                    if isinstance(similarity, (np.float32, np.float64)):
                        similarity = float(similarity)
                    elif hasattr(similarity, 'item'):  # 处理PyTorch张量
                        similarity = similarity.item()
                    
                    # 确保text_metrics字典存在
                    if 'text_metrics' not in result:
                        result['text_metrics'] = {}
                    
                    # 添加相似度结果到text_metrics
                    result['text_metrics']['chinese_sentence_bert_similarity'] = similarity
                    sentencebert_scores.append(similarity)
                    processed_count += 1
                    
                    # 每处理10条记录打印一次进度
                    if current_record_index % 10 == 0:
                        logger.info(f"已处理 {current_record_index}/{total_count} 条记录")
                        
                except Exception as e:
                    logger.error(f"处理记录 {current_record_index}/{total_count} 时发生错误: {str(e)}")
                    error_count += 1
        except Exception as e:
            logger.error(f"处理实验记录 {exp_idx} 时发生错误: {str(e)}")
            error_count += 1
    
    # 计算并添加平均中文Sentence-BERT相似度到average_metrics
    if sentencebert_scores:
        avg_sentencebert = sum(sentencebert_scores) / len(sentencebert_scores)
        logger.info(f"计算所有记录的中文Sentence-BERT相似度平均值: {avg_sentencebert}")
        
        for experiment in results:
            if 'average_metrics' not in experiment:
                experiment['average_metrics'] = {}
            experiment['average_metrics']['text_metrics.chinese_sentence_bert_similarity'] = avg_sentencebert
    else:
        logger.warning("没有找到有效的中文Sentence-BERT相似度分数，无法计算平均值")
    
    # 保存更新后的JSON文件
    try:
        with open(output_file, 'w', encoding='utf-8') as f:
            logger.info("正在保存更新后的JSON文件...")
            json.dump(results, f, ensure_ascii=False, indent=2)
        logger.info(f"文件保存成功: {output_file}")
        logger.info(f"处理完成: 成功处理 {processed_count} 条记录，失败 {error_count} 条记录")
    except Exception as e:
        logger.error(f"保存JSON文件时发生错误: {str(e)}")
        return

def main():
    """主函数"""
    # 指定输入文件路径
    input_file = "/mnt/ssd/jsj/patient/results/Matrix/experiment_results_Qwen2.5-72B-Instruct_window_full.json"
    
    # 为了安全，先保存到临时文件
    temp_output = input_file + ".temp"
    
    try:
        # 处理文件
        add_sentencebert_to_json(input_file, temp_output)
        
        # 确认临时文件生成成功
        if os.path.exists(temp_output) and os.path.getsize(temp_output) > 0:
            # 备份原文件
            backup_file = input_file + ".backup"
            if os.path.exists(input_file):
                os.rename(input_file, backup_file)
                logger.info(f"已备份原文件到: {backup_file}")
            
            # 将临时文件重命名为原文件
            os.rename(temp_output, input_file)
            logger.info(f"已将临时文件重命名为原文件: {input_file}")
        else:
            logger.error("临时文件生成失败或为空，不执行替换操作")
    except Exception as e:
        logger.error(f"执行过程中发生错误: {str(e)}")
        # 清理临时文件
        if os.path.exists(temp_output):
            os.remove(temp_output)
            logger.info("已清理临时文件")

if __name__ == "__main__":
    main()