import os
import json
import time  # 新增这行导入语句
from conversation_manager import APIClientManager
from methods import entity_extraction_method, atomic_method, one_reflection_method, cot_method

# 在代码开头设置环境变量,设置镜像 (如果需要)
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

# API配置
API_KEY = "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"
MODEL_NAME = "meta/llama3-8b-instruct"
ENTI_NUM = 10
# json保存配置
PERSONS_FILE = "/root/entity/api/prompt_entities/demo_prompt_entities.txt"
BIO_JSON = "bio_data.json"
CONV_JSON = "entity10_history.json"
# 实体方法的conversation_history是调试名称，
# entity_history.json 实体方法
# xx——history。json 其他方法
# b2_reflect_history.json

"""封装类的模型加载"""
print("===== 初始化API对话管理器 =====")
manager = APIClientManager(
    api_key=API_KEY,
    model=MODEL_NAME,
    enti_num=ENTI_NUM
)


"""
人物传记保存
"""

# 读取人物列表
try:
    with open(PERSONS_FILE, "r", encoding="utf-8") as f:
        persons = [line.strip() for line in f if line.strip()]
    print(f"成功读取 {len(persons)} 个人物")
except Exception as e:
    print(f"读取人物文件失败: {e}")
    exit(1)

# 加载或创建传记数据文件
if os.path.exists(BIO_JSON):
    try:
        with open(BIO_JSON, "r", encoding="utf-8") as f:
            bio_data = json.load(f)
        print(f"加载已有传记数据，包含 {len(bio_data)} 个人物")
    except:
        print("传记数据文件损坏，创建新文件")
        bio_data = {}
else:
    bio_data = {}
    print("创建新的传记数据文件")

# 加载或创建对话历史文件
if os.path.exists(CONV_JSON):
    try:
        with open(CONV_JSON, "r", encoding="utf-8") as f:
            conv_data = json.load(f)
        print(f"加载已有对话历史，包含 {len(conv_data)} 个人物")
    except:
        print("对话历史文件损坏，创建新文件")
        conv_data = {}  # 使用字典存储所有人物的对话历史
else:
    conv_data = {}
    print("创建新的对话历史文件")

# 处理每个人物
for i, person in enumerate(persons):
    # 跳过已处理的人物
    if person in conv_data:  # 检查对话历史而非传记数据
        print(f"\n[{i+1}/{len(persons)}] 跳过已处理人物: {person}")
        continue
    
    print(f"\n[{i+1}/{len(persons)}] 处理人物: {person}")
    
    try:
        '''
        # 重置对话历史（为新人物准备）
        manager.reset_history()
        '''
        # 检查传记是否已存在
        if person in bio_data:
            print(f"  使用现有传记")
            biography = bio_data[person]
        else:
            # 生成传记
            print("生成传记...")
            biography, _ = manager.generate_biography(person, 100)
            
            # 保存到传记数据
            bio_data[person] = biography
            with open(BIO_JSON, "w", encoding="utf-8") as f:
                json.dump(bio_data, f, ensure_ascii=False, indent=2)
            print("传记数据已保存")
        
        # '''
        # 执行实体提取法
        print("执行实体提取法...")
        conv_history = entity_extraction_method(manager, biography)
        # '''

        '''
        """运行原子方法"""
        print("===== 运行原子方法 =====")
        conv_history = atomic_method(manager, biography)
        '''

        '''
        print ("===== 运行一次反思法 =====")
        conv_history = one_reflection_method (manager, biography)
        '''

        '''
        """运行思维链方法"""
        print ("===== 运行思维链方法 =====")

        conv_history = cot_method (manager, person, 100) # 假设传入元组参数，包含 manager 和 biography，可根据实际参数需求调整
        # biography, _ = manager.generate_biography(SUBJECT, BIOGRAPHY_LENGTH)
        '''
        
        '''
        # 获取完整对话历史（包含生成传记的对话）
        full_history = manager.get_history()
        
        # 添加到全局对话历史字典
        conv_data[person] = full_history
        # json.dump(conv_data, f, ensure_ascii=False, indent=2)
        print(f"已添加 {person} 的对话历史，共 {len(full_history)} 轮对话")

        '''
        # 保存对话历史
        conv_data[person] = conv_history
        with open(CONV_JSON, "w", encoding="utf-8") as f:
            json.dump(conv_data, f, ensure_ascii=False, indent=2)
        print("对话历史已保存")

        # 避免API速率限制
        time.sleep(1)
        
    except Exception as e:
        print(f"处理 {person} 时出错: {e}")
        # 保存错误信息到传记数据（如果传记生成阶段出错）
        if person not in bio_data:
            bio_data[person] = f"ERROR: {str(e)}"
            with open(BIO_JSON, "w", encoding="utf-8") as f:
                json.dump(bio_data, f, ensure_ascii=False, indent=2)
# 所有人物处理完成后保存对话历史
print("\n所有人物处理完成，保存对话历史...")
with open(CONV_JSON, "w", encoding="utf-8") as f:
    json.dump(conv_data, f, ensure_ascii=False, indent=2)
print(f"对话历史已保存至 {CONV_JSON}，包含 {len(conv_data)} 个人物")

print("\n处理完成！")
print(f"传记数据保存至: {BIO_JSON}")
print(f"对话历史保存至: {CONV_JSON}")


"""生成待测传记"""
"""
SUBJECT = '张爱玲'
BIOGRAPHY_LENGTH = 100
OUTPUT_FILE = "biography_fact_check.json"
"""

#运行cot的时候需要关闭传记
# biography, _ = manager.generate_biography(SUBJECT, BIOGRAPHY_LENGTH)

'''
# 上面try的里面有
"""运行实体提取法"""
print("===== 运行实体提取法 =====")
entity_results = entity_extraction_method(manager, biography)
'''


'''
"""运行原子方法"""
print("===== 运行原子方法 =====")
entity_results = atomic_method(manager, biography)
'''

'''
"""运行一次反思法"""
print ("===== 运行一次反思法 =====")
reflection_results = one_reflection_method (manager, biography)
'''

'''
"""运行思维链方法"""
print ("===== 运行思维链方法 =====")
cot_results = cot_method (manager, SUBJECT, BIOGRAPHY_LENGTH) # 假设传入元组参数，包含 manager 和 biography，可根据实际参数需求调整
# biography, _ = manager.generate_biography(SUBJECT, BIOGRAPHY_LENGTH)
'''

# 保存完整对话历史
manager.save_conversation()

# 由于API不返回token概率，无法进行LURE概率处理
print("API调用不返回token概率，无法进行LURE分析")


# 添加事实性评估功能
print("\n" + "="*50)
print("开始事实性评估...")
print("="*50)

try:
    # 导入事实评估模块
    from fact_checker import FactChecker
    
    # 初始化评估器
    checker = FactChecker()
    
    # 加载对话历史数据
    print("加载对话历史数据...")
    conv_data = checker.load_data(CONV_JSON)
    if not conv_data:
        print("无法加载对话历史数据，跳过评估")
    else:
        # 处理所有传记
        print("开始评估传记...")
        output_file = f"fact_check_{os.path.basename(CONV_JSON).replace('.json', '')}.json"
        checker.process_all_biographies(conv_data, output_file)
        
        # 打印摘要
        checker.print_summary()
        
except Exception as e:
    print(f"事实性评估过程中出错: {e}")
    import traceback
    traceback.print_exc()

print("\n所有处理完成！")