import os
from datetime import datetime
from urllib.parse import urlparse

import pandas as pd
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from tqdm import tqdm

from langchain_community.document_loaders import UnstructuredURLLoader
from langchain_core.documents import Document

from utils.a_href_spider import Spider
from db.crud import get_object, update_object, create_object, create_collection, delete_collection, delete_object_by_field
from utils.data_loader import real_url, find_files, doc_loader, update_knowledge
from utils.log import logger_err
from utils.tools import config, create_vector

# 初始化调度器
scheduler = AsyncIOScheduler()


# 网页更新定时任务
def web_load():
    collection_name = config["weaviate"]["source_collection"]["name"]
    all_url_class = config["weaviate"]["source_collection"]["all_url_class"]
    dis_url_class = config["weaviate"]["source_collection"]["dis_url_class"]
    is_first = False
    doc_list = []
    dis_url = []
    url_list = Spider().get_link()
    try:
        old_url_list = get_object(collection_name=collection_name, class_name=all_url_class).properties["urls"]
        # url_list = old_url_list
    except Exception as e:
        old_url_list = None
    # 首次运行创建url列表存到数据库
    if old_url_list is None:
        is_first = True
        old_url_list = url_list
        create_object(collection_name=collection_name, class_name=all_url_class, properties={"urls": url_list})
    else:
        dis_url = get_object(collection_name=collection_name, class_name=dis_url_class).properties["urls"]

    if is_first:
        for url in tqdm(url_list):
            try:
                if urlparse(url).fragment:
                    continue
                url, title = real_url(url)
                if url not in dis_url:
                    dis_url.append(url)
                    loader = UnstructuredURLLoader([url])
                    doc = merge_doc(loader.load())
                    doc.metadata["title"] = title
                    doc.metadata["url"] = url
                    doc_list.append(doc)
            except Exception as e:
                logger_err.error(e)

        create_object(collection_name=collection_name, class_name=dis_url_class, properties={"urls": dis_url})
    else:
        for url in tqdm(url_list):
            if url in old_url_list or urlparse(url).fragment:
                    continue
            old_url_list.append(url)
            try:
                url, title = real_url(url)
                if url in dis_url:
                    continue
                dis_url.append(url)
                loader = UnstructuredURLLoader([url])
                doc = merge_doc(loader.load())
                doc.metadata["title"] = title
                doc.metadata["url"] = url
                doc_list.append(doc)
            except Exception as e:
                logger_err.error(e)

        update_object(collection_name=collection_name, class_name=all_url_class, properties={"urls": old_url_list})
        update_object(collection_name=collection_name, class_name=dis_url_class, properties={"urls": dis_url})
    # 更新知识库
    update_knowledge(doc_list)

# 文档更新定时任务
def doc_load():
    collection_name = config["weaviate"]["source_collection"]["name"]
    file_class = config["weaviate"]["source_collection"]["file_class"]
    # 获取当前脚本所在目录的路径
    current_dir = os.path.dirname(os.path.abspath(__file__))
    # 项目根目录路径
    project_root = os.path.dirname(current_dir)
    data_path = os.path.join(project_root, config["weaviate"]["source_collection"]["file_path"])
    doc_list = []
    file_list = find_files(data_path)
    file_basename = [os.path.basename(file) for file in file_list]

    # 获取文件加载历史
    try:
        old_file_list = get_object(collection_name=collection_name, class_name=file_class).properties["files"]
    except Exception as e:
        old_file_list = None

    if old_file_list is None:
        create_object(collection_name=collection_name, class_name=file_class, properties={"files": file_basename})
        old_file_list = []

    for file in tqdm(file_list):
        basename = os.path.basename(file)
        if basename in old_file_list:
            continue
        # if basename not in ["长春市图书馆.docx", "北京慧智达科技有限公司.docx", "广州市黄埔区图书馆.docx", "_杭州知书科技有限公司.docx"]:
        #     continue
        try:
            docs = doc_loader(file)
            doc = merge_doc(docs)
            doc.metadata["title"] = basename
            doc.metadata["url"] = " "
            doc_list.append(doc)
            old_file_list.append(basename)
        except Exception as e:
            logger_err.error(e)

    # 更新文件加载历史和知识库
    update_object(collection_name=collection_name, class_name=file_class, properties={"files": old_file_list})
    update_knowledge(doc_list)


# 文档合并
def merge_doc(documents: list[Document]):
    if len(documents) > 1:
        text = "\n".join(doc.page_content for doc in documents)
        return Document(page_content=text, metadata=documents[0].metadata)
    else:
        return documents[0]

# 预设问题导入
def pre_question_load():
    collection_name = config["weaviate"]["pre_query_collection"]["name"]
    delete_collection(collection_name)
    create_collection(collection_name)

    # 获取当前脚本所在目录的路径
    current_dir = os.path.dirname(os.path.abspath(__file__))
    # 项目根目录路径
    project_root = os.path.dirname(current_dir)
    data_path = os.path.join(project_root, config["weaviate"]["pre_query_collection"]["file_path"])
    file_list = find_files(data_path)

    # 读取文件插入向量库
    for file_path in file_list:
        df = pd.read_excel(file_path)

        for row in tqdm(df.itertuples(index=True, name='Person')):
            if row[1] == "序号":
                continue
            vector = create_vector(row[2])
            create_object(
                collection_name=collection_name,
                properties={"query": row[2], "text": row[3], "url": "其他", "title": "预设问题"},
                vector=vector
            )
    print(f"预设问题更新成功!{datetime.now()}")


# 视频列表更新
def video_load():
    collection_name = config["weaviate"]["video_collection"]["name"]
    # 删除原视频列表相关对象
    delete_object_by_field(collection_name=collection_name, field_name="source", field_value="video")

    # 获取当前脚本所在目录的路径
    current_dir = os.path.dirname(os.path.abspath(__file__))
    # 项目根目录路径
    project_root = os.path.dirname(current_dir)
    data_path = os.path.join(project_root, config["weaviate"]["video_collection"]["file_path"])
    file_list = find_files(data_path)

    # 读取文件插入向量库
    for file_path in file_list:
        df = pd.read_excel(file_path)

        for row in tqdm(df.itertuples(index=True, name='Person')):
            if row[1] == "url":
                continue
            vector = create_vector(row[3])
            create_object(
                collection_name=collection_name,
                properties={"text": row[3], "url": row[1], "title": row[2], "source": "video"},
                vector=vector
            )
    print(f"视频列表更新成功!{datetime.now()}")

# 添加定时任务
@scheduler.scheduled_job("cron", id="rag_update", hour=1)
def rag_update_task():
    web_load()
    pre_question_load()
    doc_load()
    video_load()

