import time
import threading
from typing import Dict, Any, Optional
from django.core.cache import cache
import hashlib
from .models import APIKey, RateLimit, ConversationSession, User
from django.conf import settings
from rag_system.enhanced_rag import EnhancedRAGSystem
from .llm_client import LLMClient

# 全局配置
# API_KEY_LENGTH = 32
# TOKEN_EXPIRY_SECONDS = 3600
# RATE_LIMIT_MAX = 5  # 每分钟最大请求数
# RATE_LIMIT_INTERVAL = 60

# 全局向量索引系统（单例）
_vector_system = None
_enhanced_rag_system = None
_vector_system_lock = threading.Lock()


def initialize_vector_system():
    """在应用启动时初始化向量索引系统（仅执行一次）"""
    global _vector_system, _enhanced_rag_system

    # 双重检查锁定模式，确保线程安全且只初始化一次
    if _vector_system is None:
        with _vector_system_lock:
            if _vector_system is None:
                from topklogsystem import TopKLogSystem

                print("🔄 [应用启动] 初始化向量索引系统...")
                _vector_system = TopKLogSystem(
                    log_path="./data/log",
                    llm="",  # no local LLM
                    embedding_model="./data/models/all-MiniLM-L6-v2",  # 构建rag模型路径
                )

                # 初始化增强RAG系统
                print("🔄 [应用启动] 初始化增强RAG系统...")
                # Use centralized LLM client that prefers remote API
                # Don't pass a local LLM; rely exclusively on remote API when configured
                llm_client = LLMClient(local_llm=None)
                _enhanced_rag_system = EnhancedRAGSystem(
                    vector_system=_vector_system, llm_client=llm_client
                )
                print("✅ [应用启动] 向量索引系统和增强RAG系统初始化完成")


def update_vector_database_with_file(file_path: str, original_filename: str):
    """
    将新上传的日志文件内容添加到向量数据库
    """
    try:
        global _vector_system

        if _vector_system is None:
            return {"success": False, "error": "向量系统未初始化"}

        if _vector_system.log_index is None:
            return {"success": False, "error": "向量索引未初始化"}

        # 读取文件内容
        with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
            file_content = f.read()

        if not file_content.strip():
            return {"success": False, "error": "文件内容为空"}

        # 解析文件内容
        file_extension = (
            original_filename.split(".")[-1].lower()
            if "." in original_filename
            else "txt"
        )

        documents = []

        if file_extension == "csv":
            # CSV文件：按行解析
            import pandas as pd

            try:
                # 使用pandas读取CSV，分块处理
                chunk_size = 1000
                for chunk in pd.read_csv(file_path, chunksize=chunk_size):
                    for row in chunk.itertuples(index=False):
                        content = str(row).replace("Pandas", " ")
                        from llama_index.core import Document

                        documents.append(
                            Document(
                                text=content,
                                metadata={
                                    "source": original_filename,
                                    "file_path": file_path,
                                },
                            )
                        )
            except Exception as e:
                print(f"⚠️ CSV解析失败，使用文本方式: {e}")
                # 回退到文本方式
                lines = file_content.split("\n")
                for i, line in enumerate(lines):
                    if line.strip():
                        from llama_index.core import Document

                        documents.append(
                            Document(
                                text=f"行{i+1}: {line.strip()}",
                                metadata={
                                    "source": original_filename,
                                    "file_path": file_path,
                                },
                            )
                        )
        else:
            # 普通文本文件：整体处理
            from llama_index.core import Document

            documents.append(
                Document(
                    text=file_content,
                    metadata={"source": original_filename, "file_path": file_path},
                )
            )

        # 将文档添加到向量索引
        added_count = 0
        for doc in documents:
            if doc.text.strip():
                try:
                    _vector_system.log_index.insert(doc)
                    added_count += 1
                    # print(f"✅ 成功添加文档: {doc.text[:50]}...")
                except Exception as e:
                    print(f"⚠️ 添加文档失败: {e}")
                    continue

        return {
            "success": True,
            "added_documents": added_count,
            "total_documents": len(documents),
            "filename": original_filename,
        }

    except Exception as e:
        print(f"❌ 更新向量数据库失败: {e}")
        return {"success": False, "error": str(e)}


def get_vector_system():
    """获取全局向量索引系统实例"""
    global _vector_system

    if _vector_system is None:
        # 如果未初始化（异常情况），进行初始化
        print("⚠️  向量系统未在启动时初始化，正在延迟初始化...")
        initialize_vector_system()

    return _vector_system


def get_enhanced_rag_system():
    """获取全局增强RAG系统实例"""
    global _enhanced_rag_system

    if _enhanced_rag_system is None:
        # 如果未初始化（异常情况），进行初始化
        print("⚠️  增强RAG系统未在启动时初始化，正在延迟初始化...")
        initialize_vector_system()

    return _enhanced_rag_system


# 线程锁用于速率限制
rate_lock = threading.Lock()


def deepseek_r1_api_call(
    prompt: str,
    user_input: str = None,
    conversation_context: str = None,
    session_id: str = "default",
    user_id: str = "anonymous",
    temperature: float = 0.7,
) -> Dict[str, Any]:
    """
    RAG增强的API调用
    流程：检索本地向量数据库 → 获取相关上下文 → 结合上下文调用API
    """
    import requests
    import json

    # 配置API
    api_url = getattr(
        settings, "AI_API_URL", "https://api.deepseek.com/v1/chat/completions"
    )
    api_key = getattr(settings, "AI_API_KEY", "")
    model_name = getattr(settings, "AI_MODEL_NAME", "deepseek-chat")

    # 如果没有配置API Key，回退到本地模型
    # if not api_key:
    #     print("⚠️  未配置AI_API_KEY，使用本地模型...")
    #     try:
    #         system = get_vector_system()
    #         result = system.query(prompt)
    #         return result["response"]
    #     except Exception as e:
    #         return f"本地模型调用失败: {str(e)}"
    if not api_key:
        print("⚠️  未配置AI_API_KEY，使用本地增强RAG模式...")
        try:
            enhanced_rag = get_enhanced_rag_system()
            result = enhanced_rag.enhanced_retrieve(
                user_input, conversation_context, session_id, user_id
            )
            return {
                "response": "本地增强RAG模式暂未实现完整响应生成",
                "context": result["context"],
                "retrieval_stats": result["retrieval_stats"],
            }
        except Exception as e:
            return {
                "response": f"本地模型调用失败: {str(e)}",
                "context": "",
                "retrieval_stats": {
                    "total_retrieved": 0,
                    "after_rerank": 0,
                    "after_filter": 0,
                    "queries_used": 0,
                },
            }

    # RAG: 智能检索相关上下文
    context = ""
    retrieval_stats = {}
    try:
        print("🔍 [enhanced RAG] 正在执行增强检索...")
        enhanced_rag = get_enhanced_rag_system()

        # 增强检索
        rag_result = enhanced_rag.enhanced_retrieve(
            query=user_input,
            conversation_history=conversation_context,
            session_id=session_id,
            user_id=user_id,
        )

        context = rag_result["context"]
        retrieval_stats = rag_result["retrieval_stats"]
        debug_info = rag_result["debug_info"]

        print(
            f"✅ [Enhanced RAG] 询问重写完成\n"
            f"    用户原始询问: {debug_info['original_query']}\n"
            f"    重写后询问: {debug_info['rewritten_queries']}\n"
            f"    重写方法: {debug_info['query_type']}\n"
            f"✅ [Enhanced RAG] 检索完成\n"
            f"    总检索: {retrieval_stats['total_retrieved']}\n"
            f"    重排后: {retrieval_stats['after_rerank']}, 过滤后: {retrieval_stats['after_filter']}\n"
            f"    使用查询数: {retrieval_stats['queries_used']}\n"
            f"✅ [Enhanced RAG] 最终日志信息: \n"
            f"    {context}"
        )

    except Exception as e:
        print(f"⚠️ [Enhanced RAG] 检索失败, 回退到基础检索模式: {str(e)}")
        # 回退到基础检索
        try:
            system = get_vector_system()
            retrieval_results = system.retrieve_logs(prompt, top_k=5)
            if retrieval_results:
                retrieved_docs = [log["content"] for log in retrieval_results]
                context = "\n\n".join(retrieved_docs)
                retrieval_stats = {
                    "total_retrieved": len(retrieval_results),
                    "fallback": True,
                }
        except Exception as fallback_e:
            print(f"⚠️ [Enhanced RAG] 基础检索失败, 询问将不使用RAG: {str(fallback_e)}")

    # 调用远程API（结合检索到的上下文）
    try:
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
        }

        # 构建增强的提示词（包含检索到的上下文）
        if context:
            enhanced_prompt = f"""对于用户询问，一些检索到的可能相关的日志及解决方法如下，请参考它们作为额外的知识补充和你自己的知识回答用户问题：

【相关日志】
{context}

【用户问题】
{prompt}

请结合上述日志信息给出专业的回答。如果日志中没有相关信息，请基于你的知识回答。"""
        else:
            enhanced_prompt = prompt

        data = {
            "model": model_name,
            "messages": [{"role": "user", "content": enhanced_prompt}],
            "temperature": temperature,
            "max_tokens": 4000,
        }

        print(f"🚀 调用DeepSeek API (RAG增强模式)")
        response = requests.post(api_url, headers=headers, json=data, timeout=180)

        if response.status_code == 200:
            result = response.json()

            # 检查响应格式
            if "choices" not in result or not result["choices"]:
                error_msg = "API响应格式错误：缺少choices字段"
                print(f"❌ {error_msg}")
                return error_msg

            choice = result["choices"][0]
            if "message" not in choice or "content" not in choice["message"]:
                error_msg = "API响应格式错误：缺少message.content字段"
                print(f"❌ {error_msg}")
                return error_msg

            reply = choice["message"]["content"]
            if not reply or not reply.strip():
                error_msg = "API返回空内容"
                print(f"❌ {error_msg}")
                return error_msg

            # 检查是否因为token限制被截断
            if choice.get("finish_reason") == "length":
                print("⚠️  API响应因达到max_tokens限制被截断")
                reply += "\n\n[注意：回答因长度限制被截断，如需完整回答请重新提问]"

            print(
                f"✅ API响应成功 (使用了{len(context)}字符的本地上下文，回复长度: {len(reply)}字符)"
            )
            return reply
        else:
            error_msg = f"API调用失败: {response.status_code} - {response.text}"
            print(f"❌ {error_msg}")
            return error_msg

    except requests.exceptions.Timeout:
        error_msg = "API调用超时（180秒），请稍后重试"
        print(f"❌ {error_msg}")
        return error_msg
    except requests.exceptions.ConnectionError as e:
        error_msg = f"网络连接失败: {str(e)}"
        print(f"❌ {error_msg}")
        return error_msg
    except Exception as e:
        error_msg = f"API调用异常: {str(e)}"
        print(f"❌ {error_msg}")
        return error_msg


def create_api_key(user: str) -> str:
    """创建 API Key 并保存到数据库"""
    key = APIKey.generate_key()
    expiry = time.time() + settings.TOKEN_EXPIRY_SECONDS

    api_key = APIKey.objects.create(key=key, user=user, expiry_time=expiry)

    # 创建对应的速率限制记录
    RateLimit.objects.create(
        api_key=api_key, reset_time=time.time() + settings.RATE_LIMIT_INTERVAL
    )

    return key


def validate_api_key(key_str: str) -> bool:
    """验证 API Key 是否存在且未过期"""
    try:
        api_key = APIKey.objects.get(key=key_str)
        if api_key.is_valid():
            return True
        else:
            api_key.delete()  # 删除过期key
            return False
    except APIKey.DoesNotExist:
        return False


def check_rate_limit(key_str: str) -> bool:
    """检查 API Key 的请求频率是否超过限制"""
    with rate_lock:
        try:
            # api_key = APIKey.objects.get(key=key_str)
            # rate_limit = RateLimit.objects.get(api_key=api_key)
            rate_limit = RateLimit.objects.select_related("api_key").get(
                api_key__key=key_str
            )

            current_time = time.time()
            if current_time > rate_limit.reset_time:
                rate_limit.count = 1
                rate_limit.reset_time = current_time + settings.RATE_LIMIT_INTERVAL
                rate_limit.save()
                return True
            elif rate_limit.count < settings.RATE_LIMIT_MAX:
                rate_limit.count += 1
                rate_limit.save()
                return True
            else:
                return False
        except RateLimit.DoesNotExist:
            # 如果速率限制记录不存在，创建一个新的
            try:
                current_time = time.time()
                api_key = APIKey.objects.get(key=key_str)
                RateLimit.objects.create(
                    api_key=api_key,
                    count=1,
                    reset_time=current_time + settings.RATE_LIMIT_INTERVAL,
                )
                return True
            except APIKey.DoesNotExist:
                return False


# def get_or_create_session(session_id: str, user: APIKey) -> ConversationSession:
# """获取或创建会话，关联当前用户（通过API Key）"""
# session, created = ConversationSession.objects.get_or_create(
# session_id=session_id,
# user=user,  # 绑定用户
# defaults={'context': ''}
# )
# return session


def get_or_create_session(session_id: str, user: User) -> ConversationSession:
    """
    获取或创建用户的专属会话：
    - 若用户+session_id已存在 → 加载旧会话（保留历史）
    - 若不存在 → 创建新会话（空历史）
    """
    session, created = ConversationSession.objects.get_or_create(
        session_id=session_id,  # 匹配会话ID
        user=user,  # 匹配当前用户（关键！避免跨用户会话冲突）
        defaults={"context": ""},
    )
    # 调试日志：确认是否创建新会话（created=True 表示新会话）
    import logging

    logger = logging.getLogger(__name__)
    logger.info(
        f"会话 {session_id}（用户：{user.username}）{'创建新会话' if created else '加载旧会话'}"
    )
    return session


def get_cached_reply(prompt: str, session_id: str, user: User) -> str | None:
    """缓存键包含 session_id 和 user，避免跨会话冲突"""
    cache_key = f"reply:{user.username}:{session_id}:{hash(prompt)}"
    return cache.get(cache_key)


def set_cached_reply(
    prompt: str, reply: str, session_id: str, user: User, timeout=3600
):
    cache_key = f"reply:{user.username}:{session_id}:{hash(prompt)}"
    cache.set(cache_key, reply, timeout)


def generate_cache_key(original_key: str) -> str:
    """
    生成安全的缓存键。
    对原始字符串进行哈希处理，确保键长度固定且仅包含安全字符。
    """
    # 使用SHA256哈希函数生成固定长度的键（64位十六进制字符串）
    hash_obj = hashlib.sha256(original_key.encode("utf-8"))
    return hash_obj.hexdigest()


def generate_session_name(user_input: str) -> str:
    """
    根据用户的第一个问题，使用AI生成简洁的会话名称
    """
    try:
        # 使用LLM客户端生成会话名称
        llm_client = LLMClient(local_llm=None)

        prompt = f"""请为以下用户问题生成一个简洁的会话名称（10-20个字以内）。
        
用户问题：{user_input}

要求：
1. 直接提取问题的核心主题
2. 使用简洁的中文表述
3. 不要包含"关于"、"如何"等冗余词汇
4. 只返回会话名称，不要有其他内容

会话名称："""

        session_name = llm_client.invoke(prompt, temperature=0.3, max_tokens=50)

        # 清理返回的名称（去除引号、换行等）
        session_name = session_name.strip().strip('"').strip("'").strip()

        # 如果生成失败或太长，使用默认名称
        if not session_name or len(session_name) > 50:
            # 截取用户输入的前20个字作为会话名
            session_name = user_input[:20] + ("..." if len(user_input) > 20 else "")

        return session_name
    except Exception as e:
        print(f"❌ 生成会话名称失败: {e}")
        # 降级方案：使用用户输入的前20个字
        return user_input[:20] + ("..." if len(user_input) > 20 else "")
