import json
import os
import faiss
import numpy as np
import torch
import gc
from torch import device
from src.utils.ai_tool import (
    llm_generate_batch_summaries,
    llm_generate_batch_summaries_keyword,
    llm_generate_recall_pipelines_professional,
    llm_generate_recall_pipelines_caring
)
from sentence_transformers import SentenceTransformer
import time
import difflib
from typing import List, Dict
from collections import OrderedDict
from src.model.AritcleModel import ArticleModel
from types import SimpleNamespace

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
CACHE_DIR = "cache"
os.makedirs(CACHE_DIR, exist_ok=True)

model_path = "models/text2vec"

# 全局变量用于模型单例
bi_encoder = None
_device = None


def get_device():
    """获取设备信息"""
    global _device
    if _device is None:
        _device = "cuda" if torch.cuda.is_available() else "cpu"
        print(f"device: {_device}")
    return _device


def init_llm_model():
    """初始化并加载Bi-Encoder模型"""
    global bi_encoder
    if bi_encoder is None:
        print("正在加载Bi-Encoder模型...")
        bi_encoder = SentenceTransformer(model_path, device=get_device())
        print("模型加载完毕。")


def get_bi_encoder():
    """获取Bi-Encoder模型单例"""
    if bi_encoder is None:
        raise RuntimeError(
            "LLM model has not been initialized. Please call init_llm_model() first."
        )
    return bi_encoder


def clear_gpu_memory():
    """清理GPU显存"""
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        gc.collect()


def values_to_sentence(json_data):
    values = []
    for key, value in json_data.items():
        if isinstance(value, bool):
            values.append("是" if value else "否")
        elif isinstance(value, (int, float)):
            values.append(str(value))
        elif isinstance(value, list):
            values.append(f"[{', '.join(map(str, value))}]")
        else:
            values.append(str(value))
    return ", ".join(values) + "。"

def sort_articles_by_interest_new(
    articles: list,
    user_profile: dict,
    version: str = "professional",
    top_k: int = 5,
    similarity_threshold: float = 0.2,
    deduplicate: bool = True,
):
    """
    根据新的推荐逻辑对文章进行排序和推荐（V3: Faiss向量召回）。
    """
    start_time = time.time()
    last_time = start_time
    print("=" * 20, flush=True)
    print(
        f"开始为用户 {user_profile.get('name')} 生成新版推荐 (Faiss向量召回)...",
        flush=True,
    )

    try:
        # --- Step 0: 初始化和预处理 ---
        print("[0/4] 初始化模型和文章数据...", flush=True)
        get_bi_encoder()
        valid_articles = [article for article in articles if article.audit]
        if not valid_articles:
            return None, {}, {}

        article_titles = [article.title for article in valid_articles]
        title_embeddings = get_bi_encoder().encode(
            article_titles, convert_to_tensor=True, normalize_embeddings=True
        )

        dimension = get_bi_encoder().get_sentence_embedding_dimension()
        index = faiss.IndexFlatIP(dimension)
        index.add(title_embeddings.cpu().numpy())
        print(
            f"  - 文章标题Faiss索引构建完毕，共 {len(valid_articles)} 篇文章。",
            flush=True,
        )
        current_time = time.time()
        print(f"  - 步骤 0 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 1: 获取用户画像 ---
        print("[1/4] 正在构建用户画像...", flush=True)
        # 增强用户画像，整合浏览和点赞历史
        base_summary = user_profile.get(
            "profile_summary") or values_to_sentence(user_profile)
        view_summary = user_profile.get("viewed_articles_summary", "")
        like_summary = user_profile.get("liked_articles_summary", "")

        user_info_enhanced = f"{base_summary} {view_summary} {like_summary}"

        print(f"  - 增强的用户画像: {user_info_enhanced[:150]}...", flush=True)
        current_time = time.time()
        print(f"  - 步骤 1 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 2: 动态生成召回策略 & Faiss向量召回 ---
        print("[2/4] 正在执行动态向量召回...", flush=True)
        
        if version == "professional":
            total_process, recall_pipelines = llm_generate_recall_pipelines_professional(user_info_enhanced)
        else:
            total_process, recall_pipelines = llm_generate_recall_pipelines_caring(user_info_enhanced)
        if not recall_pipelines or not total_process:
            print("[警告] 未能生成动态召回流程，使用默认流程。", flush=True)
            total_process = "默认推荐流程"
            recall_pipelines = {
                "农业生产": ["播种", "施肥", "灌溉"],
                "病虫害防治": ["病害", "虫害", "除草"],
            }
        print(f"  - 总流程: {total_process}", flush=True)
        print(f"  - 分流程->关键词 流程已生成: {recall_pipelines}", flush=True)
        # print(f"  - 动态召回分流程已生成: {list(recall_pipelines.keys())}", flush=True)

        candidate_articles = {}
        recalled_aids = set()
        article_distances = {}

        for process, keywords in recall_pipelines.items():
            candidate_articles[process] = {}
            for kw in keywords:
                kw_embedding = get_bi_encoder().encode(
                    [kw], convert_to_tensor=True, normalize_embeddings=True
                )
                distances, indices = index.search(kw_embedding.cpu().numpy(), top_k)

                kw_recalled = []
                for i in range(len(indices[0])):
                    # 一旦找到3篇相关的文章，就停止搜索，以确保只保留最相关的
                    if len(kw_recalled) >= 3:
                        break

                    score = distances[0][i]
                    article_index = indices[0][i]

                    if score >= similarity_threshold:
                        article = valid_articles[article_index]
                        if deduplicate and article.aid in recalled_aids:
                            continue

                        article_distances[article.aid] = float(score)
                        kw_recalled.append(article)
                        if deduplicate:
                            recalled_aids.add(article.aid)

                if kw_recalled:
                    candidate_articles[process][kw] = kw_recalled

        # --- 重排序步骤 ---
        recent_viewed_aids = user_profile.get("recent_viewed_aids", [])
        recent_liked_aids = user_profile.get("recent_liked_aids", [])

        for process in candidate_articles:
            for kw in candidate_articles[process]:
                candidate_articles[process][kw].sort(
                    key=lambda article: (
                        2 if article.aid in recent_liked_aids else
                        1 if article.aid in recent_viewed_aids else
                        0
                    ),
                    reverse=True
                )

        # --- 改进的重排序和过滤步骤 ---
        aid_to_index = {article.aid: i for i,
                        article in enumerate(valid_articles)}
        recent_viewed_aids = set(user_profile.get("recent_viewed_aids", []))
        recent_liked_aids = set(user_profile.get("recent_liked_aids", []))

        def get_embeddings_by_aids(aids):
            indices = [aid_to_index.get(aid)
                       for aid in aids if aid in aid_to_index]
            if not indices:
                return torch.tensor([])
            return title_embeddings[indices]

        recent_viewed_embeddings = get_embeddings_by_aids(recent_viewed_aids)
        recent_liked_embeddings = get_embeddings_by_aids(recent_liked_aids)

        for process in candidate_articles:
            for kw in candidate_articles[process]:
                scored_articles = []
                recalled_list = candidate_articles[process][kw]

                for article in recalled_list:
                    article_embedding = title_embeddings[aid_to_index[article.aid]].unsqueeze(
                        0)
                    score = 0.0

                    # 计算与点赞历史的相似度
                    if recent_liked_embeddings.numel() > 0:
                        liked_sims = torch.mm(
                            article_embedding, recent_liked_embeddings.T)
                        score += 2.0 * torch.max(liked_sims).item()

                    # 计算与浏览历史的相似度
                    if recent_viewed_embeddings.numel() > 0:
                        viewed_sims = torch.mm(
                            article_embedding, recent_viewed_embeddings.T)
                        score += 1.0 * torch.max(viewed_sims).item()

                    scored_articles.append((article, score))

                # 按分数排序
                scored_articles.sort(key=lambda x: x[1], reverse=True)
                # 更新候选列表为排序后的、未看过的文章
                candidate_articles[process][kw] = [
                    item[0] for item in scored_articles]

        total_recalled = sum(
            len(v) for p in candidate_articles.values() for v in p.values()
        )
        print(f"  - 体系化向量召回并过滤后，共 {total_recalled} 篇文章。", flush=True)
        current_time = time.time()
        print(f"  - 步骤 2 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 3: 批量生成带上下文的个性化摘要 ---
        print("[3/4] 正在批量生成带上下文的个性化文章摘要...", flush=True)

        # 收集所有需要摘要的文章及其首次出现的上下文
        articles_with_context = {}
        for process, keywords_data in candidate_articles.items():
            for keyword, recalled_articles_list in keywords_data.items():
                for article in recalled_articles_list:
                    if article.aid not in articles_with_context:
                        articles_with_context[article.aid] = {
                            "article": article,
                            "process": process,
                            "keyword": keyword,
                        }

        articles_to_summarize_data = [
            {
                "id": str(data["article"].aid),
                "content": data["article"].profile.get("summary")
                or values_to_sentence(data["article"].profile),
                "process": data["process"],
                "keyword": data["keyword"],
            }
            for data in articles_with_context.values()
        ]

        # 一次性调用API获取所有摘要
        summaries_map = {}
        if articles_to_summarize_data:
            try:
                summaries_map = llm_generate_batch_summaries(
                    user_info_enhanced, articles_to_summarize_data
                )
            except Exception as e:
                print(f"调用批量摘要API时出错: {e}", flush=True)

        current_time = time.time()
        print(f"  - 步骤 3 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 4: 构建三层嵌套字典 ---
        print("[4/4] 正在构建三层推荐结构...", flush=True)
        final_recommendations = []
        # 遍历 recall_pipelines 以保持流程顺序
        for process, keywords_in_pipeline in recall_pipelines.items():
            if process in candidate_articles:
                process_data = candidate_articles[process]

                keyword_dict_for_process = OrderedDict()

                # 遍历管道中的关键字以保持顺序
                for keyword in keywords_in_pipeline:
                    if keyword in process_data:
                        recalled_articles_list = process_data[keyword]
                        articles_for_keyword = []
                        for article in recalled_articles_list:
                            summary = summaries_map.get(
                                str(article.aid), "根据您的兴趣，为您精心挑选。"
                            )
                            articles_for_keyword.append(
                                {"article": article, "summary": summary}
                            )

                        if articles_for_keyword:
                            keyword_dict_for_process[keyword] = articles_for_keyword

                if keyword_dict_for_process:
                    final_recommendations.append({process: keyword_dict_for_process})

        current_time = time.time()
        print(f"  - 步骤 4 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 4: 完成 ---
        end_time = time.time()
        print(
            f"[完成] 新版推荐生成完毕，总用时: {end_time - start_time:.2f} 秒",
            flush=True,
        )
        print("=" * 20, flush=True)

        return total_process, final_recommendations, article_distances

    except Exception as e:
        print(f"处理过程中发生错误: {e}", flush=True)
        raise


def sort_articles_by_keyword_interest_new(
    articles: list,
    user_profile: dict,
    keywords: List[str],
    top_k: int = 5,
    similarity_threshold: float = 0.7,
    deduplicate: bool = False,
):
    """
    根据用户提供的关键词列表，进行个性化推荐（V3.2: Faiss向量召回 + LLM批量个性化摘要）
    """
    start_time = time.time()
    last_time = start_time
    print("=" * 20, flush=True)
    print(
        f"开始为用户 {user_profile.get('name')} 基于关键词 '{keywords}' 生成推荐...",
        flush=True,
    )

    try:
        # --- Step 0: 初始化和预处理 ---
        print("[0/4] 初始化模型和文章数据...", flush=True)
        get_bi_encoder()
        valid_articles = [article for article in articles if article.audit]
        if not valid_articles:
            return {}, {}

        article_titles = [article.title for article in valid_articles]

        title_embeddings = get_bi_encoder().encode(
            article_titles, convert_to_tensor=True, normalize_embeddings=True
        )

        dimension = get_bi_encoder().get_sentence_embedding_dimension()
        index = faiss.IndexFlatIP(dimension)
        index.add(title_embeddings.cpu().numpy())
        print(
            f"  - 文章标题Faiss索引构建完毕，共 {len(valid_articles)} 篇文章。",
            flush=True,
        )
        current_time = time.time()
        print(f"  - 步骤 0 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 1: 构建关键词查询向量 ---
        print("[1/4] 正在构建关键词查询...", flush=True)
        if not keywords:
            print("[警告] 关键词列表为空，无法进行推荐。", flush=True)
            return {}, {}

        keyword_str = " ".join(keywords).lower()
        print(f"  - 合并后的关键词查询: '{keyword_str}'", flush=True)
        kw_embedding = get_bi_encoder().encode(
            [keyword_str], convert_to_tensor=True, normalize_embeddings=True
        )
        current_time = time.time()
        print(f"  - 步骤 1 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 2: Faiss向量召回 ---
        print("[2/4] 正在执行向量召回...", flush=True)
        distances, indices = index.search(kw_embedding.cpu().numpy(), top_k)

        recalled_articles = []
        article_distances = {}
        recalled_aids = set()

        for i in range(len(indices[0])):
            score = distances[0][i]
            article_index = indices[0][i]

            if score >= similarity_threshold:
                article = valid_articles[article_index]
                if deduplicate and article.aid in recalled_aids:
                    continue

                article_distances[article.aid] = float(score)
                recalled_articles.append(article)
                if deduplicate:
                    recalled_aids.add(article.aid)

        if not recalled_articles:
            print("  - 未召回任何符合条件的文章。", flush=True)
            return {}, {}

        print(f"  - 向量召回完成，共召回 {len(recalled_articles)} 篇文章。", flush=True)
        current_time = time.time()
        print(f"  - 步骤 2 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 3: 批量生成个性化摘要 ---
        print("[3/4] 正在批量生成个性化文章摘要...", flush=True)
        user_info_enhanced = user_profile.get("profile_summary") or values_to_sentence(
            user_profile
        )

        articles_to_summarize_data = [
            {
                "id": str(article.aid),
                "content": article.profile.get("summary")
                or values_to_sentence(article.profile),
            }
            for article in recalled_articles
        ]

        summaries_map = {}
        if articles_to_summarize_data:
            try:
                summaries_map = llm_generate_batch_summaries_keyword(
                    user_info_enhanced, articles_to_summarize_data, keywords
                )
            except Exception as e:
                print(f"调用批量摘要API时出错: {e}", flush=True)

        current_time = time.time()
        print(f"  - 步骤 3 耗时: {current_time - last_time:.2f} 秒", flush=True)
        last_time = current_time

        # --- Step 4: 构建最终返回结果 ---
        print("[4/4] 正在构建最终推荐列表...", flush=True)

        final_article_list = []
        for article in recalled_articles:
            new_summary = summaries_map.get(str(article.aid))

            new_profile = article.profile.copy()
            if new_summary:
                new_profile["summary"] = new_summary

            article_like_obj = SimpleNamespace(
                aid=article.aid,
                title=article.title,
                view=article.view,
                profile=new_profile,
                createtime=article.createtime,
            )
            final_article_list.append(article_like_obj)

        final_recommendations = {"关键词匹配推荐": final_article_list}

        end_time = time.time()
        print(
            f"[完成] 关键词推荐生成完毕，总用时: {end_time - start_time:.2f} 秒",
            flush=True,
        )
        print("=" * 20, flush=True)

        return final_recommendations, article_distances

    except Exception as e:
        print(f"处理过程中发生错误: {e}", flush=True)
        import traceback

        traceback.print_exc()
        raise
