import os
import glob
from datetime import datetime

import chardet
import requests
from bs4 import BeautifulSoup
from langchain_community.document_loaders import (
    PyMuPDFLoader,
    PyPDFLoader,
    TextLoader,
    Docx2txtLoader,
    CSVLoader,
    UnstructuredPowerPointLoader,
    DedocFileLoader
)
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from tqdm import tqdm

from db.crud import create_objects
from utils.tools import config


# 读取指定目录下文件
def find_files(directory: str, extension=None):
    """
    遍历指定目录及子目录，返回所有文件名称。
    如果指定了extension，则只返回该类型的文件。
    """
    file_list = []

    # 构建glob pattern，支持匹配所有文件或特定扩展名文件
    pattern = f"{directory}/**/*"
    if extension:
        pattern += f".{extension}"

    for filename in glob.iglob(pattern, recursive=True):
        if os.path.isfile(filename):
            file_list.append(filename)

    return file_list


# 文档读取
def doc_loader(file_path: str):
    file_ext = os.path.splitext(file_path)[1].lower()
    if file_ext == ".pdf":
        try:
            return PyPDFLoader(file_path).load()
        except Exception as e:
            print(f"Error loading PDF file: {e}")
            return PyMuPDFLoader(file_path).load()
    if file_ext == ".txt":
        return TextLoader(file_path).load()
    if file_ext == ".doc":
        return DedocFileLoader(file_path).load()
    if file_ext == ".docx":
        return Docx2txtLoader(file_path).load()
    if file_ext == ".csv":
        return CSVLoader(file_path).load()
    if file_ext == ".xlsx" or file_ext == ".xls":
        return DedocFileLoader(file_path).load()
    if file_ext == ".pptx":
        return UnstructuredPowerPointLoader(file_path).load()
    with open(file_path, 'rb') as file:
        raw_data = file.read()
        result = chardet.detect(raw_data)
        encoding = result['encoding']

    with open(file_path, 'r', encoding=encoding, errors='ignore') as f:
        return [Document(page_content=f.read(), metadata={"source": file_path})]


# 获取链接
def real_url(url: str):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    if response.url.endswith("/"):
        return response.url[:-1], soup.title.string
    else:
        return response.url, soup.title.string


# 更新知识库
def update_knowledge(documents: list[Document], index_name: str = config["weaviate"]["search_collection"]["name"]):
    text_splitter = RecursiveCharacterTextSplitter(
        separators=["\n\n", "\n", "。"],
        chunk_size=512,
        chunk_overlap=100,
    )
    docs = text_splitter.split_documents(documents)
    create_objects(collection_name=index_name, objects=docs)
    print(f"知识库更新成功!{datetime.now()}")
