import os
import bs4
import uuid
from ..dao import document_dao
from src.common import commonUtils
from src.common.config import config
from src.common.logger import getLogger
from src.agentic.config.GraphStore import GraphStore
from src.agentic.config.VectorStore import VectorStore
from langchain_core.output_parsers import StrOutputParser
from src.agentic.config.LanguageModel import LanguageModel
from src.agentic.config.EmbeddingModel import EmbeddingModel
from langchain_community.document_loaders import WebBaseLoader

logger = getLogger()

def select_document_list(args):
    return document_dao.select_document_list(args)

def select_document_page(args):
    list = document_dao.select_document_page(args)
    logger.info(f"select_document_page list len: {len(list)}")
    return list

def select_document_library(args):
    row = document_dao.select_document_by_number(args)
    return row

def upload_document(file):
    filename = file.filename
    logger.info(f"upload_document file filename: {filename}")

    library_number = commonUtils.string_to_hash(filename, "md5", 16)

    content = extract_document_content("Document", file)

    collection_name = insert_document_vector(library_number, content)
    logger.info(f"upload_document insert_document_vector finished")

    insert_document_graph(collection_name, content)
    logger.info(f"upload_document insert_document_graph finished")

    file.seek(0, 2)
    file_size = file.tell()
    file.seek(0)
    file_size = file_size // 1024 // 1024
    if file_size == 0:
        file_size = file_size % 1024
    file_type = filename.split(".")[-1]
    insert_document_record(filename, file_type, file_size, library_number, content)
    logger.info(f"upload_document insert_document_record finished")

    save_path = "D:/Downloads/taixu/documents"
    logger.info(f"upload_document save_path: {save_path}")
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    with open(os.path.join(save_path, filename), 'wb') as f:
        f.write(file.read())
    logger.info(f"upload_document document save finished")

def insert_document_record(filename, file_type, file_size, library_number, content):
    from ..model.document_model import Document

    document = Document()
    document.id = uuid.uuid4()
    document.document_name = filename
    document.document_type = file_type
    document.document_size = file_size
    document.library_number = library_number
    document.document_summary = extract_content_summary(content)
    document_dao.insert_row(document)

def delete_document_record(args):
    from ..model.document_model import Document

    config_dict = config.parse_config_key(["qdrant"])
    collection_prefix = config_dict.get("collection_prefix", "")
    logger.info(f"delete_document_record collection_prefix: {collection_prefix}")

    vector_client = VectorStore().new_vector_client()
    graph_client = GraphStore().new_graph_store()

    ids = args.get("ids")
    ids = ids.split(",")
    for id in ids:
        document = Document.query.get(id)

        collection_name = collection_prefix + document.library_number
        logger.info(f"delete_document_record collection_name: {collection_name}")
        collection_exist = vector_client.collection_exists(collection_name)
        logger.info(f"delete_document_record collection_exist: {collection_exist}")
        if collection_exist:
            vector_client.delete(collection_name)

        response = graph_client.query(
            f"""
                MATCH (n)
                WHERE any(lbl IN labels(n) WHERE lbl STARTS WITH "{collection_name}")
                RETURN count(n) AS node_count
            """
        )
        logger.info(f"delete_document_record graph_client query response: {response}")
        node_count = response[0]["node_count"]
        if node_count > 0:
            delete_cypher = """
                CALL apoc.periodic.iterate(
                "CALL db.labels() YIELD label 
                WHERE label STARTS WITH '{node_prefix}' 
                WITH collect(label) AS labels 
                UNWIND labels AS l
                CALL apoc.cypher.run('MATCH (n:`' + l + '`) RETURN n', {}) YIELD value
                RETURN value.n AS node",
                "DETACH DELETE node",
                {batchSize: 1000, parallel: false}
                )
                YIELD batches, total, timeTaken
                RETURN batches, total, timeTaken
            """
            response = graph_client.query(delete_cypher.format(node_prefix = collection_prefix))
            logger.info(f"delete_document_record graph_client delete response: {response}")

        document_dao.delete_row(document)

def upload_website(website):
    library_number = commonUtils.string_to_hash(website, "md5", 16)
    logger.info(f"upload_website library_number: {library_number}")

    content = extract_document_content("Website", website)
    logger.info(f"upload_website content len: {len(content)}")

    collection_name = insert_document_vector(library_number, content)
    logger.info(f"upload_website insert_document_vector finished")

    insert_document_graph(collection_name, content)
    logger.info(f"upload_website insert_document_graph finished")

    insert_document_record(website, "html", 0, library_number, content)
    logger.info(f"upload_website insert_document_record finished")

def extract_document_content(document_type, document):
    if document_type == "Document":
        return commonUtils.extract_document_content(document)
    else:
        web_loader = WebBaseLoader(
            web_paths = (document,),
            bs_kwargs = dict(parse_only = bs4.SoupStrainer())
        )
        web_docs = web_loader.load()
        logger.info(f"extract_document_content web_docs len: {len(web_docs)}")

        content = ""
        for doc in web_docs:
            content += doc.page_content
        return content

def extract_content_summary(content):
    logger.info(f"extract_content_summary content len: {len(content)}")
    base_url = "http://localhost:11434"
    llm_model = LanguageModel("qwen3:4b", base_url, None)
    llm = llm_model.new_llm_model()

    template = """
        请总结概括以下文本内容：
        {content}
        最终的回答长度不超过1000字，且不能返回字数。
    """
    prompt = template.format(content = content)
    summary = (llm | StrOutputParser()).invoke(prompt)
    logger.info(f"extract_content_summary summary len: {len(summary)}")
    return summary

def insert_document_vector(library_number, content):
    logger.info(f"insert_document_vector library_number: {library_number}")
    base_url = "http://localhost:11434"
    embed_model = EmbeddingModel("bge-m3", base_url)
    embedding = embed_model.new_embed_model()

    config_dict = config.parse_config_key(["qdrant"])
    collection_prefix = config_dict.get("collection_prefix", "")
    logger.info(f"insert_document_vector collection_prefix: {collection_prefix}")

    collection_name = collection_prefix + library_number
    logger.info(f"insert_document_vector collection_name: {collection_name}")
    VectorStore().add_document_vector(embedding, collection_name, content)
    logger.info(f"insert_document_vector vector_store {collection_name} finished")
    return collection_name

def insert_document_graph(collection_name, content):
    logger.info(f"insert_document_graph collection_name: {collection_name}")

    base_url = "http://localhost:11434"
    llm_model = LanguageModel("deepseek-v3.1:671b-cloud", base_url, None)
    llm = llm_model.new_llm_model()

    GraphStore().add_document_graph(llm, collection_name, content)
    logger.info(f"insert_document_graph prefix label {collection_name} finished")

def preview_document(args):
    save_path = "D:/Downloads/taixu/documents"
    logger.info(f"preview_document save_path: {save_path}")

    fileName = args.get("documentName")
    if args.get("documentType") == "html":
        content = extract_document_content("Website", fileName)
    else:
        file_path = os.path.join(save_path, fileName)
        with open(file_path, "rb") as file:
            file.filename = fileName
            content = extract_document_content("Document", file)
    return content
