# llm_enhancement/1_generate_profiles.py
import os
import json
import torch
import time
import random
import argparse
import collections
from tqdm import tqdm
from openai import OpenAI
from prompts import ITEM_PROFILE_PROMPT, USER_PROFILE_PROMPT

try:
    from dotenv import load_dotenv

    load_dotenv()
except ImportError:
    pass

# --- API 客户端初始化 ---
api_key = os.environ.get('DEEPSEEK_API_KEY')
client = None
if api_key:
    try:
        client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
        print("\033[1;32mDeepSeek API 客户端初始化成功。\033[0m")
    except Exception as e:
        print(f"\033[1;31mDeepSeek API 客户端初始化失败: {e}\033[0m")
else:
    print("\033[1;33m警告: DEEPSEEK_API_KEY 未设置，将使用模拟响应。\033[0m")


def get_llm_response(prompt, retries=3, delay=5):
    if not client: return _get_mock_response(prompt)
    for _ in range(retries):
        try:
            response = client.chat.completions.create(model="deepseek-chat",
                                                      messages=[{"role": "user", "content": prompt}], stream=False,
                                                      temperature=0.1, response_format={"type": "json_object"})
            return json.loads(response.choices[0].message.content)
        except Exception as e:
            print(f"LLM API 调用失败: {e}。重试中...")
            time.sleep(delay)
    return _get_mock_response(prompt)


def _get_mock_response(prompt):
    if "item metadata" in prompt.lower():
        return {"summarization": "模拟物品画像", "reasoning": "模拟响应"}
    elif "Item Profiles" in prompt:
        return {"summarization": "模拟用户画像", "reasoning": "模拟响应"}
    return None


def load_names_from_list_file(file_path, id_col='remap_id', name_col='org_id'):
    """
    【关键修正】: 智能地从KGAT风格的list文件中加载ID到名称的映射。
    它会自动解析表头来定位ID列和名称列。
    """
    id_to_name = {}
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            header = f.readline().strip().split()

            if id_col not in header or name_col not in header:
                print(f"\033[1;31m错误: 文件 {file_path} 的表头中未找到 '{id_col}' 或 '{name_col}' 列。\033[0m")
                return {}

            id_idx = header.index(id_col)
            name_idx = header.index(name_col)

            for line in f:
                parts = line.strip().split()
                if len(parts) > max(id_idx, name_idx):
                    try:
                        remap_id = int(parts[id_idx])
                        # org_id 可能包含空格，但在这个文件中通常是第一列
                        org_id = parts[name_idx]
                        id_to_name[remap_id] = org_id
                    except (ValueError, IndexError):
                        continue  # 跳过格式不正确的行
    except FileNotFoundError:
        print(f"\033[1;31m错误: 文件未找到于 '{file_path}'\033[0m")
    except Exception as e:
        print(f"解析 {file_path} 时出错: {e}")
    return id_to_name


def generate_profiles(data_name, data_dir, sample_size=100):
    full_data_dir = os.path.join(data_dir, data_name)
    os.makedirs(full_data_dir, exist_ok=True)

    item_id_to_name = load_names_from_list_file(os.path.join(full_data_dir, 'item_list.txt'))
    if not item_id_to_name:
        print("\033[1;31m错误: 未能从 item_list.txt 加载任何物品信息，程序终止。\033[0m")
        return

    print("\n--- 步骤 1: 生成物品画像 ---")
    item_ids = list(item_id_to_name.keys())
    item_ids_to_process = random.sample(item_ids, min(sample_size, len(item_ids))) if sample_size > 0 and len(
        item_ids) > sample_size else item_ids

    item_profiles = {}
    for item_id in tqdm(item_ids_to_process, desc="生成物品画像"):
        item_name = item_id_to_name.get(item_id, f"item_{item_id}")
        prompt = ITEM_PROFILE_PROMPT.format(metadata_json=json.dumps({"title": item_name}, ensure_ascii=False))
        response_json = get_llm_response(prompt)
        if response_json and "summarization" in response_json:
            item_profiles[str(item_id)] = response_json["summarization"]

    print("\n--- 步骤 2: 生成用户画像 ---")
    train_file = os.path.join(full_data_dir, 'train.txt')
    user_dict = collections.defaultdict(list)
    try:
        with open(train_file, 'r') as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) > 1:
                    user_dict[int(parts[0])].extend([int(i) for i in parts[1:]])
    except FileNotFoundError:
        print(f"\033[1;31m错误: 训练文件 {train_file} 未找到。\033[0m")
        return

    user_ids = list(user_dict.keys())
    user_ids_to_process = random.sample(user_ids, min(sample_size, len(user_ids))) if sample_size > 0 and len(
        user_ids) > sample_size else user_ids

    user_profiles = {}
    for user_id in tqdm(user_ids_to_process, desc="生成用户画像"):
        liked_items = user_dict[user_id]
        liked_item_profiles = [item_profiles.get(str(item_id)) for item_id in liked_items if
                               str(item_id) in item_profiles]
        if not liked_item_profiles:
            continue
        prompt = USER_PROFILE_PROMPT.format(item_profiles_str="\n- ".join(liked_item_profiles[:5]))
        response_json = get_llm_response(prompt)
        if response_json and "summarization" in response_json:
            user_profiles[str(user_id)] = response_json["summarization"]

    item_profile_path = os.path.join(full_data_dir, 'item_profiles.json')
    user_profile_path = os.path.join(full_data_dir, 'user_profiles.json')
    with open(item_profile_path, 'w', encoding='utf-8') as f:
        json.dump(item_profiles, f, ensure_ascii=False, indent=2)
    with open(user_profile_path, 'w', encoding='utf-8') as f:
        json.dump(user_profiles, f, ensure_ascii=False, indent=2)

    print(f"\n\033[1;32m处理完成！\033[0m")
    print(f"  - 生成了 {len(item_profiles)} 个物品画像，已保存到: {item_profile_path}")
    print(f"  - 生成了 {len(user_profiles)} 个用户画像，已保存到: {user_profile_path}")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_name', type=str, default='amazon-book')
    parser.add_argument('--data_dir', type=str, default='data/')
    parser.add_argument('--sample_size', type=int, default=200)
    cli_args, _ = parser.parse_known_args()
    generate_profiles(data_name=cli_args.data_name, data_dir=cli_args.data_dir, sample_size=cli_args.sample_size)