# llm_enhancement/3_encode_profiles.py
import os
import json
import torch
import argparse
from sentence_transformers import SentenceTransformer


def load_ids_from_list_file(file_path, id_col='remap_id'):
    """【关键修正】: 智能地从list文件中加载所有ID。"""
    ids = []
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            header = f.readline().strip().split()
            if id_col not in header:
                print(f"\033[1;31m错误: 文件 {file_path} 表头中未找到 '{id_col}' 列。\033[0m")
                return []
            id_idx = header.index(id_col)
            for line in f:
                parts = line.strip().split()
                if len(parts) > id_idx:
                    try:
                        ids.append(int(parts[id_idx]))
                    except (ValueError, IndexError):
                        continue
    except FileNotFoundError:
        print(f"\033[1;31m错误: 文件未找到于 '{file_path}'\033[0m")
    except Exception as e:
        print(f"解析 {file_path} 时出错: {e}")
    return ids


def encode_profiles(data_name, data_dir, model_name='all-MiniLM-L6-v2', device='cuda'):
    print("\n--- 步骤 3: 开始将文本画像编码为嵌入向量 ---")
    full_data_dir = os.path.join(data_dir, data_name)
    user_json_path = os.path.join(full_data_dir, 'user_profiles.json')
    item_json_path = os.path.join(full_data_dir, 'item_profiles.json')
    user_pt_path = os.path.join(full_data_dir, 'user_profiles.pt')
    item_pt_path = os.path.join(full_data_dir, 'item_profiles.pt')

    try:
        print(f"正在加载句子编码模型: {model_name}...")
        encoder = SentenceTransformer(model_name, device=device)
        print("模型加载成功。")
    except Exception as e:
        print(f"\033[1;31m错误: 加载SentenceTransformer模型失败: {e}\033[0m")
        return

    # --- 编码用户画像 ---
    if os.path.exists(user_json_path) and os.path.getsize(user_json_path) > 2:
        with open(user_json_path, 'r', encoding='utf-8') as f:
            user_profiles = json.load(f)
        all_user_ids = load_ids_from_list_file(os.path.join(full_data_dir, 'user_list.txt'))
        if not all_user_ids: return

        n_users = max(all_user_ids) + 1
        embedding_dim = encoder.get_sentence_embedding_dimension()
        full_user_embeds = torch.zeros((n_users, embedding_dim), dtype=torch.float32)

        print(f"开始编码 {len(user_profiles)} 个用户画像...")
        user_ids_to_encode = [int(uid) for uid in user_profiles.keys()]
        user_texts_to_encode = list(user_profiles.values())
        user_embeds = encoder.encode(user_texts_to_encode, show_progress_bar=True, convert_to_tensor=True,
                                     device=device)
        full_user_embeds[user_ids_to_encode] = user_embeds.cpu()
        torch.save(full_user_embeds, user_pt_path)
        print(f"\033[1;32m用户画像嵌入已保存到: {user_pt_path}\033[0m")
    else:
        print(f"警告: 未找到或空的 {user_json_path}。")

    # --- 编码物品画像 ---
    if os.path.exists(item_json_path) and os.path.getsize(item_json_path) > 2:
        with open(item_json_path, 'r', encoding='utf-8') as f:
            item_profiles = json.load(f)
        all_item_ids = load_ids_from_list_file(os.path.join(full_data_dir, 'item_list.txt'))
        if not all_item_ids: return

        n_items = max(all_item_ids) + 1
        embedding_dim = encoder.get_sentence_embedding_dimension()
        full_item_embeds = torch.zeros((n_items, embedding_dim), dtype=torch.float32)
        print(f"开始编码 {len(item_profiles)} 个物品画像...")
        item_ids_to_encode = [int(iid) for iid in item_profiles.keys()]
        item_texts_to_encode = list(item_profiles.values())
        item_embeds = encoder.encode(item_texts_to_encode, show_progress_bar=True, convert_to_tensor=True,
                                     device=device)
        full_item_embeds[item_ids_to_encode] = item_embeds.cpu()
        torch.save(full_item_embeds, item_pt_path)
        print(f"\033[1;32m物品画像嵌入已保存到: {item_pt_path}\033[0m")
    else:
        print(f"警告: 未找到或空的 {item_json_path}。")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_name', type=str, default='amazon-book')
    parser.add_argument('--data_dir', type=str, default='data/')
    parser.add_argument('--gpu_id', type=int, default=0)
    cli_args, _ = parser.parse_known_args()
    device = f'cuda:{cli_args.gpu_id}' if torch.cuda.is_available() and cli_args.gpu_id != -1 else 'cpu'
    encode_profiles(data_name=cli_args.data_name, data_dir=cli_args.data_dir, device=device)