# llm_enhancement/2_generate_cs_kg.py
import os
import json
import time
import random
import itertools
import argparse
from tqdm import tqdm
from openai import OpenAI
from prompts import COMMONSENSE_RELATION_PROMPT

try:
    from dotenv import load_dotenv

    load_dotenv()
except ImportError:
    pass

api_key = os.environ.get('DEEPSEEK_API_KEY')
client = None
if api_key:
    try:
        client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
    except Exception as e:
        print(f"API 客户端初始化失败: {e}")


def get_llm_response(prompt, retries=3, delay=5):
    # ...
    return _get_mock_response(prompt)


def _get_mock_response(prompt):
    # ...
    return {"relationship": random.choice(["Complementary", "Substitute", "Irrelevant"]),
            "confidence": round(random.uniform(0.7, 0.95), 2), "justification": "This is a mock justification."}


def load_names_from_list_file(file_path, id_col='remap_id', name_col='org_id'):
    """【关键修正】: 智能地从KGAT风格的list文件中加载ID到名称的映射。"""
    id_to_name = {}
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            header = f.readline().strip().split()
            if id_col not in header or name_col not in header:
                print(f"\033[1;31m错误: 文件 {file_path} 的表头中未找到 '{id_col}' 或 '{name_col}' 列。\033[0m")
                return {}
            id_idx = header.index(id_col)
            name_idx = header.index(name_col)
            for line in f:
                parts = line.strip().split()
                if len(parts) > max(id_idx, name_idx):
                    try:
                        id_to_name[int(parts[id_idx])] = parts[name_idx]
                    except (ValueError, IndexError):
                        continue
    except FileNotFoundError:
        print(f"\033[1;31m错误: 文件未找到于 '{file_path}'\033[0m")
    except Exception as e:
        print(f"解析 {file_path} 时出错: {e}")
    return id_to_name


def generate_commonsense_kg(data_name, data_dir, sample_size=200):
    full_data_dir = os.path.join(data_dir, data_name)
    output_file = os.path.join(full_data_dir, 'llm_enhanced_kg.txt')

    item_id_to_name = load_names_from_list_file(os.path.join(full_data_dir, 'item_list.txt'))
    if not item_id_to_name:
        print("\033[1;31m错误: 未能从 item_list.txt 加载任何物品信息，程序终止。\033[0m")
        return

    item_ids = list(item_id_to_name.keys())
    if sample_size > 0 and len(item_ids) > sample_size:
        sampled_ids = random.sample(item_ids, sample_size)
    else:
        sampled_ids = item_ids

    print(f"将从 {len(sampled_ids)} 个物品中生成物品对，进行常识关系推理。")
    item_pairs = list(itertools.combinations(sampled_ids, 2))
    relation_map = {"Complementary": 0, "Substitute": 1}

    with open(output_file, 'w', encoding='utf-8') as f:
        f.write("h\tr\tt\tconfidence\n")
        for id1, id2 in tqdm(item_pairs, desc="生成常识KG"):
            name1 = item_id_to_name.get(id1, f"item_{id1}")
            name2 = item_id_to_name.get(id2, f"item_{id2}")
            prompt = COMMONSENSE_RELATION_PROMPT.format(item_1_title=name1, item_2_title=name2)
            response_json = get_llm_response(prompt)
            if response_json and response_json.get("relationship") in relation_map:
                relation_id = relation_map[response_json["relationship"]]
                confidence = response_json.get("confidence", 0.5)
                f.write(f"{id1}\t{relation_id}\t{id2}\t{confidence}\n")
                f.write(f"{id2}\t{relation_id}\t{id1}\t{confidence}\n")

    print(f"\n\033[1;32m常识知识图谱已成功保存到: {output_file}\033[0m")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_name', type=str, default='amazon-book')
    parser.add_argument('--data_dir', type=str, default='data/')
    parser.add_argument('--sample_size', type=int, default=200)
    cli_args, _ = parser.parse_known_args()
    generate_commonsense_kg(data_name=cli_args.data_name, data_dir=cli_args.data_dir, sample_size=cli_args.sample_size)