# ====================================================================================
# 文件: preprocessing/step1_CIKG_user_enhance.py
# 描述: [V5 修复版]
#      (修复) parse_llm_args 现在返回剩余参数 (remaining_args)。
#      (修复) main 函数将 remaining_args 传递给 get_pre_config()
#      以避免 "未知参数" 警告。
# ====================================================================================

import os
import random
import json
import jsonlines
import pandas as pd
import numpy as np
from tqdm import tqdm
import aiohttp
import asyncio
import torch
import warnings
import time
import argparse
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer

# 导入共享的解析器和工具函数
from base_pre_parser import get_pre_config
from pre_utils import get_system_generate, get_prompt_generate, clean_text

warnings.filterwarnings("ignore", category=FutureWarning, module='sklearn.cluster._kmeans')


# =========================================================================
# 1. vLLM 运行时参数解析器
# =========================================================================
def parse_llm_args():
    parser = argparse.ArgumentParser(description="SDKR Step1: vLLM Runtime Arguments")
    parser.add_argument('--vllm-host', type=str, required=True, help="vLLM 服务的主机地址")
    parser.add_argument('--vllm-model-name', type=str, required=True, help="vLLM 加载的模型名称")
    parser.add_argument('--vllm-concurrency', type=int, required=True, help="vLLM 并发请求数")

    # [V14 修复 2/3] 解析已知参数，并返回剩余参数
    args, unknown_args = parser.parse_known_args()
    return args, unknown_args


# =========================================================================
# 2. 异步 LLM API 请求
# =========================================================================
# ( ... 此部分无变化 ... )
async def fetch(session, uid, payload, endpoint_url):
    try:
        timeout = aiohttp.ClientTimeout(total=300)
        async with session.post(endpoint_url, json=payload, timeout=timeout) as response:
            if response.status == 200:
                result = await response.json()
                content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
                return {"custom_id": str(uid), "response_content": content}
            else:
                error_text = await response.text()
                if response.status == 404:
                    print(f"\n[fetch Error 404] 针对 {uid} 的请求失败: {error_text}")
                return {"custom_id": str(uid), "error": f"HTTP Status {response.status}: {error_text}"}
    except asyncio.TimeoutError:
        return {"custom_id": str(uid), "error": "Request timed out after 300s"}
    except Exception as e:
        return {"custom_id": str(uid), "error": str(e)}


async def process_user_batches(user_his_dict, endpoint_url, model_path, field, concurrency_limit):
    tasks = []
    semaphore = asyncio.Semaphore(concurrency_limit)
    system_prompt = get_system_generate(field)

    async with aiohttp.ClientSession() as session:
        pbar = tqdm(total=len(user_his_dict), desc="[Step 1.2] 提交 vLLM 请求")

        for uid, history in user_his_dict.items():
            message = [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": get_prompt_generate(history, field)}
            ]
            payload = {"model": model_path, "messages": message, "max_tokens": 100, "temperature": 0.0}

            async def bounded_fetch(uid, payload, endpoint_url):
                async with semaphore:
                    result = await fetch(session, uid, payload, endpoint_url)
                    pbar.update(1)
                    return result

            tasks.append(bounded_fetch(uid, payload, endpoint_url))

        results = await asyncio.gather(*tasks)
        pbar.close()
        return results


# =========================================================================
# 3. 兴趣聚类
# =========================================================================
# ( ... 此部分无变化 ... )
def cluster_interests(llm_output_path, cluster_num, cluster_type, filter_min, filter_max_ratio):
    print(f"[Step 1.3] 开始聚类。类型: {cluster_type}, 簇数: {cluster_num}")

    intent_triples = []
    intents_list = []
    waste_count = 0

    try:
        with open(llm_output_path, mode='r', encoding='utf-8') as f:
            for answer in jsonlines.Reader(f):
                row_id = answer.get('custom_id', 'UNKNOWN_ID')
                if "error" in answer:
                    waste_count += 1
                    continue

                raw_intents = answer.get("response_content", "")
                if not raw_intents:
                    waste_count += 1
                    continue

                clean_intents = [clean_text(it) for it in raw_intents.strip().split(',')]
                valid_intents = [it for it in clean_intents if it and 1 < len(it) <= 50]

                if not valid_intents:
                    waste_count += 1
                    continue

                for it in valid_intents:
                    intent_triples.append([row_id, it])
                intents_list.extend(valid_intents)

    except FileNotFoundError:
        print(f"错误: LLM 临时输出文件未找到: {llm_output_path}")
        raise

    print(f"  > LLM 原始输出行处理完毕。有效兴趣: {len(intents_list)}, 丢弃/错误行: {waste_count}")
    if not intents_list:
        print("错误: 没有从 LLM 输出中提取到有效兴趣点。")
        return None

    unique_intents_list = list(set(intents_list))
    print(f"  > 发现 {len(unique_intents_list)} 个独立兴趣点。开始编码...")

    if cluster_type == 'st':
        from pre_utils import get_sbert_embeddings
        device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
        print(f"  > [ST] 使用设备: {device}")

        embeddings = get_sbert_embeddings(unique_intents_list, 'sentence-transformers/all-MiniLM-L6-v2', device)

    else:  # tfidf
        print("  > [TF-IDF] 使用 TF-IDF 编码器...")
        vectorizer = TfidfVectorizer(max_df=0.8, ngram_range=(1, 2), stop_words='english')
        embeddings = vectorizer.fit_transform(unique_intents_list)

    print(f"  > 编码完成。开始 K-Means 聚类 (K={cluster_num})...")
    if cluster_num > len(unique_intents_list):
        print(f"  > 警告: 簇数 {cluster_num} 大于独立兴趣点数 {len(unique_intents_list)}。")
        cluster_num = len(unique_intents_list)
        print(f"  > 自动调整簇数为: {cluster_num}")

    kmeans = KMeans(n_clusters=cluster_num, n_init=10, random_state=2024)
    cluster_ids = kmeans.fit_predict(embeddings)

    interest_to_cluster_map = dict(zip(unique_intents_list, cluster_ids))

    intent_triples_df = pd.DataFrame(intent_triples, columns=['uid', 'interest'])
    intent_triples_df['cluster_id'] = intent_triples_df['interest'].map(interest_to_cluster_map)

    intent_triples_df['uid'] = intent_triples_df['uid'].astype(int)
    intent_triples_df = intent_triples_df.dropna(subset=['cluster_id'])
    intent_triples_df['cluster_id'] = intent_triples_df['cluster_id'].astype(int)

    user_num = len(intent_triples_df['uid'].unique())
    print(f'  > 聚类完成。过滤前: {len(intent_triples_df)} 条边, {user_num} 个用户。')

    cluster_stats = intent_triples_df.groupby('cluster_id')['uid'].count()

    del_list_max_count = int(user_num * filter_max_ratio)
    del_clusters_max = set(cluster_stats[cluster_stats >= del_list_max_count].index)

    del_clusters_min = set(cluster_stats[cluster_stats <= filter_min].index)

    print(f"  > 移除 {len(del_clusters_max)} 个通用簇 (>= {del_list_max_count} 个用户)")
    print(f"  > 移除 {len(del_clusters_min)} 个噪音簇 (<= {filter_min} 个用户)")

    del_list = del_clusters_max.union(del_clusters_min)
    intent_triples_df_filter = intent_triples_df[~intent_triples_df['cluster_id'].isin(del_list)]

    kg_intent = intent_triples_df_filter[['uid', 'cluster_id']].drop_duplicates()

    print(f'  > 过滤后: {len(kg_intent)} 条唯一的 (用户, 兴趣簇) 边。')

    if len(kg_intent) == 0:
        print("错误: 过滤移除了所有兴趣边。请尝试调整 YAML 中的 'min_cluster_size' 或 'max_cluster_ratio'。")
        return None

    return kg_intent


# =========================================================================
# 4. 主函数
# =========================================================================

def main():
    print("--- [Step 1] CIKGRec: 用户兴趣增强 ---")

    # [ V14 修复 2/3 ]
    # 1. 先解析 vLLM 运行时参数
    llm_args, remaining_args = parse_llm_args()
    # 2. 将剩余参数 (即 --config ...) 传递给 get_pre_config
    config = get_pre_config(remaining_args)

    conf_data = config.data_config
    conf_pre = config.preproc_config.user_enhance

    DATA_DIR = config.DATA_DIR
    PROJECT_ROOT = config.PROJECT_ROOT

    # --- 1.1 加载所需数据 ---
    # ( ... 此部分无变化 ... )
    print("[Step 1.1] 加载用户交互历史和物品元数据...")
    item_text_file = os.path.join(DATA_DIR, conf_data.item_meta_file)

    if not os.path.exists(item_text_file):
        print(f"错误: 物品元数据文件未找到: {item_text_file}");
        return

    item_id_to_name = {}
    with open(item_text_file, 'r') as f:
        item_meta = json.load(f)
        for item_id, text in item_meta.items():
            item_id_to_name[int(item_id)] = text

    if not item_id_to_name:
        print(f"错误: 未能从 {item_text_file} 加载任何物品标题。");
        return
    print(f"  > 加载了 {len(item_id_to_name)} 个物品标题。")

    train_file = os.path.join(DATA_DIR, conf_data.train_file)
    if not os.path.exists(train_file):
        print(f"错误: 训练文件未找到: {train_file}");
        return

    user_his_items = {}
    with open(train_file) as f:
        for l in f.readlines():
            if len(l) > 0:
                l = l.strip('\n').split(' ')
                if len(l) > 1:
                    uid = int(l[0])
                    items = [int(i) for i in l[1:]]
                    if uid not in user_his_items: user_his_items[uid] = []
                    user_his_items[uid].extend(items)

    user_his_names = {}
    max_his_num = conf_pre.llm_prompt_max_history
    field_type = 'book' if 'book' in conf_data.dataset else 'item'

    for uid, items in user_his_items.items():
        names = [item_id_to_name.get(i) for i in items if i in item_id_to_name]
        if not names: continue

        if len(names) > max_his_num:
            user_his_names[uid] = random.sample(names, max_his_num)
        else:
            user_his_names[uid] = names

    if not user_his_names:
        print("错误: 未能构建任何用户的历史记录 (物品名称匹配失败)。");
        return

    print(f"  > 构建了 {len(user_his_names)} 个用户的文本历史记录。")

    # --- 1.2 调用 vLLM API ---
    # ( ... 此部分无变化 ... )
    vllm_endpoint = f"{llm_args.vllm_host.strip('/')}/v1/chat/completions"
    llm_output_file = os.path.join(PROJECT_ROOT, "batch_output", f"{conf_data.dataset}_vllm_output.jsonl")

    os.makedirs(os.path.dirname(llm_output_file), exist_ok=True)
    start_time = time.time()

    try:
        loop = asyncio.get_event_loop()
        if loop.is_running():
            import nest_asyncio
            nest_asyncio.apply()
            loop = asyncio.get_event_loop()
    except:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    all_results = loop.run_until_complete(process_user_batches(
        user_his_names,
        vllm_endpoint,
        llm_args.vllm_model_name,
        field_type,
        llm_args.vllm_concurrency
    ))

    print(f"  > vLLM 处理完成，耗时: {time.time() - start_time:.2f} 秒。")

    success_count = 0
    with jsonlines.open(llm_output_file, mode='w') as writer:
        for res in all_results:
            writer.write(res)
            if "response_content" in res:
                success_count += 1

    print(f"  > 成功保存 {success_count} 条 vLLM 结果到: {llm_output_file}")
    if success_count == 0:
        print("错误: vLLM 未返回任何成功结果。请检查 vLLM 服务和配置。");
        return

    # --- 1.3 聚类 ---
    # ( ... 此部分无变化 ... )
    kg_intent = cluster_interests(
        llm_output_file,
        conf_pre.cluster_num,
        conf_pre.cluster_type,
        conf_pre.min_cluster_size,
        conf_pre.max_cluster_ratio
    )

    if kg_intent is None:
        print("错误: 聚类失败。");
        return

    # --- 1.4 ID 映射与保存 ---
    # ( ... 此部分无变化 ... )
    kg_file = os.path.join(DATA_DIR, conf_data.kg_file)

    if not os.path.exists(kg_file):
        print(f"错误: KG 文件未找到: {kg_file}。无法计算最大实体 ID。");
        return

    print("[Step 1.4] 计算最大实体 ID...")
    max_entity_id = -1
    with open(kg_file, 'r') as f:
        for line in f:
            parts = line.strip().split()
            if len(parts) == 3:
                h, r, t = map(int, parts)
                max_entity_id = max(max_entity_id, h, t)

    max_entity_id = max(max_entity_id, max(item_id_to_name.keys()))

    if max_entity_id == -1:
        print("错误: 未能从 KG 文件中读取任何实体。");
        return

    cluster_id_offset = max_entity_id + 1
    print(f"  > 最大实体/物品 ID: {max_entity_id}")
    print(f"  > 兴趣簇 ID 将从 {cluster_id_offset} 开始。")

    kg_intent['cluster_id'] = kg_intent['cluster_id'] + cluster_id_offset

    final_output_path = os.path.join(DATA_DIR, conf_pre.output_file)

    kg_intent.to_csv(final_output_path, sep=' ', index=False, header=None)

    print(f"\n✅ [Step 1] 成功完成!")
    print(f"最终用户兴趣图文件已保存到: {final_output_path}")


if __name__ == "__main__":
    main()