import json
import os
from datetime import datetime
from typing import List
from fastapi import UploadFile, File
from langchain_community.chat_models import ChatOpenAI
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter, TokenTextSplitter

import config
from config import project_root
from tables.user import UserModel
from tables.file import Files
from tables.vector import Vectors
from utils.main import doc_loader, merge_doc
from fastapi.concurrency import run_in_threadpool

async def upload(files: List[UploadFile] = File(...), rag_id: str = "", current_user: UserModel = None):
    """
    文件上传接口，支持单文件和多文件上传
    """
    for file in files:
        user_upload_dir = os.path.join(project_root, "data", "user_files", current_user.username, rag_id)
        if not os.path.exists(user_upload_dir):
            os.makedirs(user_upload_dir)

        file_path = os.path.join(user_upload_dir, file.filename)

        # 保存文件
        content = await file.read()
        with open(file_path, "wb") as f:
            f.write(content)

        now = datetime.now()
        # 格式化为 yyyy-MM-dd HH:mm:ss
        formatted_date_time = now.strftime('%Y-%m-%d %H:%M:%S')

        # 保存文件信息到数据库
        Files.insert_new_file(current_user.id, os.path.basename(file_path), rag_id, file_path,formatted_date_time)


def delete_local_file(file_path: str) -> bool:
    """
    删除本地文件
    """
    try:
        os.remove(file_path)
        return True
    except Exception as e:
        print(f"删除文件失败: {str(e)}")
        return False


async def file_to_chunks(
        collection_name: str,
        userid: int,
        filepath: str,
        rag_id: str,
        chunk_type: int,
        embed: str,
        llm: str
):
    # 解析文件
    docs = await doc_loader(filepath)
    document = await run_in_threadpool(merge_doc, docs)
    await to_chunks(
        rag_id=rag_id,
        userid=userid,
        collection_name=collection_name,
        filename=os.path.basename(filepath),
        chunk_type=chunk_type,
        embed=embed,
        llm=llm,
        document=document
    )

 # 修改文件分块状态
    await run_in_threadpool(
        Files.update_file_chunk_status,
        userid,
        os.path.basename(filepath),
        rag_id,
        True
    )


async def to_chunks(
        collection_name: str,
        rag_id: str,
        userid: int,
        filename: str,
        chunk_type: int,
        embed: str,
        llm: str,
        document: Document,
):
    """
    文件分块
    """
    # 分块器
    full_text_splitter = TokenTextSplitter(
        chunk_size=100000,
        chunk_overlap=10000,
    )

    # 根据模型类型确定分块长度(暂时只接入一个本地embed，后续改造存储到数据库)
    if embed == "conan-embedding":
        chunk_size = 500
        overlap = 100
    else:
        chunk_size = 1000
        overlap = 200

    text_splitter = RecursiveCharacterTextSplitter(
        separators=[""],
        chunk_size=chunk_size,
        chunk_overlap=overlap,
        keep_separator=True,
    )
    # 1: 默认分块，2: 上下文分块
    progress = 0
    if chunk_type == 1:
        documents = text_splitter.split_documents([document])
        documents = [
            {
                "text": doc.page_content,
                "description": "",
            } for doc in documents
        ]
        weight = 1
    else:
        documents = []
        full_docs = full_text_splitter.split_documents([document])
        for i, full_doc in enumerate(full_docs):
            sen_doc = text_splitter.split_documents([full_doc])
            for j, sen in enumerate(sen_doc):
                llm_chain = ChatOpenAI(
                    model=llm,
                    api_key=config.model["api_key"],
                    base_url=config.model["api_base"],
                    temperature=0.2,
                )
                # llm获取文本块在全文的摘要
                response = llm_chain.invoke(
                    config.contextual_prompt.format(
                        WHOLE_DOCUMENT=full_doc.page_content,
                        CHUNK_CONTENT=sen.page_content
                    )
                )
                documents.append({
                    "text": sen.page_content,
                    "description": response.content,
                })
                progress = i / len(full_docs) + (j + 1) / (len(sen_doc) * len(full_docs))
                # 更新进度到数据库
                await Files.update_file_chunk_progress(userid=userid, filename=filename, rag_id=rag_id, progress=progress)
                # yield {"code": 200, "message": "进度", "progress": progress * 95,
                #        "filename": filename}
        weight = 0.05
    chunk_size = 0
    # 上下文分块成本较高，可添加重试操作----待添加
    try:
        for i, doc in enumerate(documents):
            await Vectors.create_chunk(
                collection_name=collection_name,
                filename=filename,
                text=doc["text"],
                description=doc["description"],
                embed=embed,
                num=i + 1,
            )
            chunk_size = i + 1
            # 更新进度到数据库
            _progress = (progress + (i + 1) / len(documents) * weight) * 100
            await Files.update_file_chunk_progress(userid=userid, filename=filename, rag_id=rag_id, progress=_progress)
            # yield json.dumps({
            #     "code": 200,
            #     "message": "进度",
            #     "progress": (progress + (i + 1) / len(documents) * weight) * 100,
            #     "filename": filename
            # }) + "\n"
        await Files.update_file_chunk_size_progress(chunk_size+1,userid=userid, filename=filename, rag_id=rag_id)
    except Exception as e:
        # 失败删除添加的块
        await Files.update_file_chunk_progress(userid=userid, filename=filename, rag_id=rag_id, progress=0.0)
        await Vectors.delete_chunks(collection_name=collection_name, source=filename)
        raise Exception(str(e))
