# -*- coding: utf-8 -*-
# @File:     custom_loaders.py
# @Author:
# @DateTime: 2025/10/21/15:17

import tempfile
import os
from langchain_community.document_loaders import UnstructuredMarkdownLoader, TextLoader, UnstructuredWordDocumentLoader, UnstructuredHTMLLoader, \
    JSONLoader, CSVLoader, UnstructuredPDFLoader, UnstructuredExcelLoader, UnstructuredPowerPointLoader
from langchain_community.document_loaders.base import BaseLoader
from langchain_core.documents import Document
from pathlib import Path
import json
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, Optional, Union, AsyncIterator

from .parse_file_data import process_file_to_markdown


class CustomJSONLoader(JSONLoader):
    def __init__(self,
        file_path: Union[str, PathLike],
        jq_schema: str,
        content_key: Optional[str] = None,
        is_content_key_jq_parsable: Optional[bool] = False,
        metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None,
        text_content: bool = True,
        json_lines: bool = False,
        encoding: Optional[str] = None,
    ):
        if encoding is None:
            self.encoding = "utf-8-sig"
        else:
            self.encoding = encoding

        super().__init__(file_path, jq_schema, content_key, is_content_key_jq_parsable, metadata_func, text_content, json_lines)

    def lazy_load(self) -> Iterator[Document]:
        """Load and return documents from the JSON file."""
        index = 0
        if self._json_lines:
            with self.file_path.open(encoding=self.encoding) as f:
                for line in f:
                    line = line.strip()
                    if line:
                        for doc in self._parse(line, index):
                            yield doc
                            index += 1
        else:
            for doc in self._parse(
                self.file_path.read_text(encoding=self.encoding), index
            ):
                yield doc
                index += 1


def file_with_loaders_documents_old(file_bytes: bytes, file_org_name: str):
    file_type = Path(file_org_name).suffix.lower()
    tmp_file = tempfile.TemporaryFile(mode='wb', delete=False)
    tmp_file.write(file_bytes)
    file_name = tmp_file.name
    tmp_file.close()
    if file_type == '.txt':
        loader = TextLoader(file_name, encoding='utf-8')
    elif file_type == '.md':
        loader = UnstructuredMarkdownLoader(file_name)
    elif file_type in [".docx", ".doc"]:
        loader = UnstructuredWordDocumentLoader(file_name)
    elif file_type in [".html", ".htm"]:
        loader = UnstructuredHTMLLoader(file_name)
    elif file_type in [".json"]:
        loader = CustomJSONLoader(file_name, jq_schema=".", text_content=False, encoding='utf-8')
    elif file_type in [".csv"]:
        loader = CSVLoader(file_name)
    elif file_type in ['.pdf']:
        loader = UnstructuredPDFLoader(file_name)
    elif file_type in ['.xls', '.xlsx']:
        loader = UnstructuredExcelLoader(file_name)
    elif file_type in ['.ppt', '.pptx']:
        loader = UnstructuredPowerPointLoader(file_name)
    else:
        raise ValueError(f"不支持的文件类型: {file_type}")
    docs = loader.load()
    documents = []
    for doc in docs:
        doc.metadata = {}
        doc.metadata['source'] = file_org_name
        documents.append(doc)
    try:
        os.remove(file_name)
    except:
        pass
    return documents


class CustomStringLoader(BaseLoader):
    def __init__(self, text):
        self.text = text

    def lazy_load(self) -> Iterator[Document]:

        yield Document(page_content=self.text, metadata={})

    async def alazy_load(self) -> AsyncIterator[Document]:

        yield Document(page_content=self.text, metadata={})


def file_with_loaders_documents(file_bytes: bytes, file_org_name: str):

    text = process_file_to_markdown(file_bytes, file_org_name)
    loader = CustomStringLoader(text)
    docs = loader.load()
    documents = []
    for doc in docs:
        doc.metadata = {}
        doc.metadata['source'] = file_org_name
        documents.append(doc)
    return documents

