import os
import uuid
import logging
import datetime
from ..presets import *
from ..db_models.db_knowledge_file import KnowledgeFile
from django.db import transaction
from typing import List
import asyncio
from asgiref.sync import sync_to_async
import time

from .db.knowledge_file import *
from .db.knowledge import *
from ..utils import *
from .knowledge_stores import (
    parse_file as PARSE_FILE,
    add_db_store as ADD_DB_STORE,
    parse_file_stream as PARSE_FILE_STREAM,
)
from .knowledge_redis import (
    delete_by_prefix as DELETE_BY_PREFIX,
    search_text as REDIS_SEARCH_TEXT,
)
from django.http import FileResponse
from django.utils.encoding import escape_uri_path
from .db.knowledge_file import FileStatus
from .config import get_config as GET_CONFIG


logger = logging.getLogger("chat_app")


def upload_file(user_name, knowledge_id, file, file_config: dict):
    """上传文件
    @param user_name: 用户名
    @param knowledge_id: 知识库id
    @param file: 文件对象
    @param file_config: 文件配置
    """
    file_extension = os.path.splitext(file.name)
    title = file_extension[0]
    file_type = file_extension[1]
    id = str(uuid.uuid4()).replace("-", "")
    file_id = f"{id}{file_type}"
    file_size_mb = "{:.2f}".format(file.size / 1024 / 1024)
    # 保存文件
    file_path = os.path.join(KNOWLEDGE_UPLOAD_FILE_DIR, user_name, file_id)
    save_data = save_upload_file(
        user_name,
        knowledge_id,
        file,
        file_type,
        file_path,
        file_id,
        title,
        file_size_mb,
    )
    id = save_data["id"]
    index_name = save_data["index_name"]

    # 创建事件循环并执行异步任务
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(
        parse_file_stream(
            id, user_name, file_path, file_type, file_config, title, index_name
        )
    )
    loop.close()


@transaction.atomic
def save_upload_file(
    user_name, knowledge_id, file, file_type, file_path, file_id, title, file_size_mb
) -> dict:
    """保存本地文件
    @param user_name: 用户名
    @param knowledge_id: 知识库id
    @param file: 文件对象
    @param file_config: 文件配置
    """
    try:

        # 查询知识库基本信息
        index_name = search_knowledge_id(knowledge_id)["index_name"]
        logger.debug(f"{user_name}上传文件中....")

        # 保存文件
        save_file(file, file_path)
        # 入库
        document_count = 0
        document_ids = None
        id = save_knowledge_file(
            user_name,
            knowledge_id,
            file_id,
            title,
            file_type,
            file_path,
            file_size_mb,
            document_ids,
            document_count,
            index_name,
            status=FileStatus.UPLOAD.value,
        )
        logger.debug(f"{user_name}上传文件成功....")
        return {"id": id, "index_name": index_name}
    except Exception as e:
        logger.error(f"上传文件【{title}】失败: {e}")
        if os.path.exists(file_path):
            os.remove(file_path)
        raise AgentException("INTERNAL_ERROR", f"上传文件【{title}】失败")


def parse_file(id, user_name, file_path, file_type, file_config, title):
    """解析文档"""
    try:
        # 解析文档
        update_file_status(id, status_enum=FileStatus.PARSE)
        logger.debug(f"{user_name}解析文件中....")
        doc_data = PARSE_FILE(id, file_path, file_type, file_config, title)
        logger.debug(f"{user_name}解析文件成功")

        document_count = len(doc_data)
        update_file_docment_count(id, document_count, status_enum=FileStatus.VECTOR)
        logger.debug(f"{user_name}文件向量中....")

        return doc_data
    except Exception as e:
        logger.error(f"解析文件【{title}】失败: {e}")
        update_file_docment_count(id, 0, status_enum=FileStatus.PARSE_FAIL)
        raise AgentException("INTERNAL_ERROR", f"解析文件【{title}】失败")


def store_file(id, user_name, index_name, doc_data, title):
    """向量化文档"""
    try:
        # 保存向量数据
        ADD_DB_STORE(id, index_name, doc_data)
        update_file_index_ids(id, None, status_enum=FileStatus.COMPLETE)
        logger.debug(f"{user_name}文件向量完成....")
        logger.info(f"{user_name}上传文件成功")
    except Exception as e:
        logger.error(f"向量化文件【{title}】失败: {e}")
        update_file_index_ids(id, None, status_enum=FileStatus.VECTOR_FAIL)
        raise AgentException("INTERNAL_ERROR", f"向量化文件【{title}】失败")


def store_file_stram(id, user_name, index_name, doc_data: List[Document], title):
    """向量化文档"""
    try:
        # 保存向量数据
        start_time = time.time()
        ADD_DB_STORE(id, index_name, doc_data)
        # 添加定时进度报告（每分钟报告一次）
        current_time = time.time()
        elapsed_min = (current_time - start_time) / 60
        count = len(doc_data)
        speed = count / max(1, elapsed_min)  # 每分钟处理数
        logger.info(
            f"已处理 {count} 个文档片段 | "
            f"耗时: {elapsed_min:.1f} 分钟 | "
            f"速度: {speed:.1f} 文档/分钟"
        )

    except Exception as e:
        logger.error(f"向量化文件【{title}】失败: {e}")
        update_file_index_ids(id, None, status_enum=FileStatus.VECTOR_FAIL)
        raise AgentException("INTERNAL_ERROR", f"向量化文件【{title}】失败")


# 创建异步版本的 ORM 函数
async_update_file_status = sync_to_async(update_file_status)
async_update_file_docment_count = sync_to_async(update_file_docment_count)
async_store_file = sync_to_async(store_file_stram)


async def parse_file_stream(
    id, user_name, file_path, file_type, file_config, title, index_name
):
    """解析文档-流式"""
    try:
        # 初始化处理计数器
        processed_count = 0
        batch = []
        batch_size = 10  # 每10个文档批量处理一次

        # 添加计时器 ⏱️
        start_time = time.time()
        last_log_time = start_time

        # 解析文档
        await async_update_file_status(id, status_enum=FileStatus.VECTOR)
        logger.debug(f"{user_name}文件向量中....")

        # 流式处理：来一个文档处理一个
        async for processed_doc in PARSE_FILE_STREAM(
            id, file_path, file_type, file_config, title
        ):
            # 实时处理每个分割后的文档
            batch.append(processed_doc)
            # 更新计数器
            processed_count += 1
            if len(batch) >= batch_size:
                await async_store_file(id, user_name, index_name, batch, title)
                batch = []  # 重置批次

        # 处理剩余的文档
        if batch:
            await async_store_file(id, user_name, index_name, batch, title)

        # 计算总耗时
        total_time = time.time() - start_time
        total_min = total_time / 60

        # 所有文档处理完成后更新状态为完成 ✅
        logger.info(
            f"文件处理完成，共处理 {processed_count} 个文档片段 | "
            f"总耗时: {total_min:.1f} 分钟 | "
            f"平均速度: {processed_count/max(1, total_min):.1f} 文档/分钟"
        )

        await async_update_file_docment_count(
            id, processed_count, status_enum=FileStatus.COMPLETE
        )
        logger.debug(f"{user_name}文件向量完成....")
    except Exception as e:
        # 计算失败时的耗时
        total_time = time.time() - start_time
        logger.error(
            f"文件处理失败 | 已处理 {processed_count} 个片段 | "
            f"耗时: {total_time:.1f} 秒 | 错误: {e}"
        )

        logger.error(f"解析文件【{title}】失败: {e}")
        await async_update_file_docment_count(
            id, processed_count, status_enum=FileStatus.VECTOR_FAIL
        )
        raise AgentException("INTERNAL_ERROR", f"解析文件【{title}】失败")


def get_file_suffix(original_filename):
    # 提取文件后缀
    file_extension = os.path.splitext(original_filename)
    return file_extension[1]


def save_file(file, file_path):
    """保存文件
    @param file: 文件对象
    @param file_path:文件路径
    """
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, "wb") as f:
        for chunk in file.chunks():
            f.write(chunk)


def delete_file(id):
    """删除文件
    @param id: 主键id
    """
    try:
        with transaction.atomic():
            detail = search_knowledge_file_id(id)
            if detail is None:
                return
            file_path = detail["file_path"]
            res = delete_knowledge_file(id)
            if res < 0:
                raise Exception("删除文件失败")
            # 删除向量数据
            index_name = detail["file_index_name"]
            DELETE_BY_PREFIX(index_name, id)
            # 删除文件
            os.remove(os.path.join(file_path))
    except Exception as e:
        raise e


def down_file(id) -> FileResponse:
    """下载文件
    @param id: 主键id
    """
    try:
        with transaction.atomic():
            detail = search_knowledge_file_id(id)
            if detail is None:
                return
            file_path = detail["file_path"]
            title = detail["title"]
            file_type = detail["file_type"]
            file_name = f"{title}{file_type}"
            # 读取文件内容
            if os.path.exists(file_path):
                response = FileResponse(open(file_path, "rb"))
                response["Content-Type"] = "application/octet-stream"
                # 文件名为中文时无法识别，使用escape_uri_path处理
                response["Content-Disposition"] = (
                    "attachment; "
                    "filename*=UTF-8''{}".format(escape_uri_path(file_name))
                )
                return response
            else:
                raise AgentException("INTERNAL_ERROR", "文件路径不存在")
    except Exception as e:
        logger.error("文件下载时发生错误", e)
        raise AgentException("INTERNAL_ERROR", "文件下载失败")


def save_knowledge_data(user_name, know_name, index_name, description):
    """保存知识库
    @param data: 知识库对象
    """
    try:
        with transaction.atomic():
            # 保存知识库
            knowledge = Knowledge(
                user_name=user_name,
                know_name=know_name,
                index_name=index_name,
                description=description,
            )
            return save_knowledge(knowledge)
    except Exception as e:
        raise e


def update_knowledge_data(id, know_name, index_name, description, user_name):
    """更新知识库"""
    try:
        with transaction.atomic():
            # 更新知识库
            res = update_knowledge(id, know_name, index_name, description, user_name)
            if res > 0:
                return search_knowledge_id(id)
            else:
                raise AgentException("INTERNAL_ERROR", "更新知识库失败")
    except Exception as e:
        raise e


def search_knowledge_data(user_name: str, know_name: str) -> List[Dict[str, Any]]:
    """搜索知识库
    @param user_name: 用户名
    @param know_name: 知识库名称
    """
    return search_knowledge(user_name, know_name)


def delete_knowledge_data(user_name, id) -> bool:
    """删除知识库
    @param id: 主键id
    """
    try:
        with transaction.atomic():
            # 先查询知识库的文件是否都删除
            files = search_knowledge_file(user_name, id, None)
            if len(files) > 0:
                raise AgentException("INTERNAL_ERROR", "请先删除知识库下的文件")
            # 删除知识库
            res = delete_knowledge(id)
            if res > 0:
                return True
            else:
                raise AgentException("INTERNAL_ERROR", "删除知识库失败")

    except Exception as e:
        raise e


def knowledge_retrieve(know_id, input, accuracy) -> str:
    """知识库检索
    @param know_id: 知识库id
    @param input: 用户输入
    @param distance_threshold: 检索精确度,值越大匹配度越高
    @return: 返回知识库检索结果pronmpt
    """
    try:
        # 查询知识库基本信息
        detail = search_knowledge_id(know_id)
        if detail is None:
            raise AgentException("INTERNAL_ERROR", "知识库不存在")
        # 查询知识库文件
        index_name = detail["index_name"]

        list = REDIS_SEARCH_TEXT(index_name, input, distance_threshold=1 - accuracy)
        # 直接返回结果
        return_list = []
        if list is not None:
            for item in list:
                if item["metadata"] is None:
                    return_list.append(item["content"])
                else:
                    config = GET_CONFIG()
                    server_address = config.get("server_address", "")
                    file_http = f"{server_address}/knowledge/down?id={item['metadata'].get('file_id', '')}"
                    return_list.append(f"{item['content']}\nFileSource:{file_http}")

        else:
            return None

        # reference_results = add_source_numbers(list, use_source=False)
        today = datetime.today().strftime("%Y-%m-%d")
        real_input = (
            KNOWLEDGE_PROMPT_TEMPLATE.replace("{current_date}", today)
            .replace("{query_str}", input)
            .replace("{context_str}", "\n\n".join(return_list))
        )

        return real_input
    except Exception as e:
        logger.error(f"知识库检索失败: {e}")
        raise AgentException("INTERNAL_ERROR", "知识库检索失败")
