import re
import os
import asyncio

from typing import List, Tuple

from langchain.text_splitter import TextSplitter
from utils import _save, _load, get_now, get_text_content
from consts import (
    _FILE_SPLIT,
    _FILE_META,
    _FILE_META_ERR
)

from load.prompt import prompt_extract_meta
from tqdm.asyncio import tqdm as tqdm_async
from load.entity import News

from llm import LLM
from loguru import logger

class TaggedTextSplitter(TextSplitter):
    def __init__(self, **kwargs):
        """
        稿件Start\n  ....  \n稿件End
        来分段

        :param pattern: 用于匹配标题的正则表达式。
        """
        super().__init__(**kwargs)
        self.pattern = r'稿件Start(.*?)稿件End'

    def split_text(self, text: str) -> List[str]:
        docs = []

        # 根据每个标题的位置，分割文本并保存到 docs 中
        for piece in re.findall(self.pattern, text, re.DOTALL):
            docs.append(piece.strip())

        return docs

# 这个类会根据不同的客户需求，接进来，有可能不需要做任何的数据解析，这里是纯文本数据输入
class DocImport:
    def __init__(self, source_dir: str, working_dir: str = "/tmp/randmin123j"):
        self.source_dir = source_dir
        self.working_dir = working_dir
        self.file_split = os.path.join(working_dir, _FILE_SPLIT)
        self.file_meta  = os.path.join(working_dir, _FILE_META)
        self.file_meta_err  = os.path.join(working_dir, _FILE_META_ERR)
        self.llm = LLM()

    async def process(self) -> Tuple[int, int]:
        # 需要重新读区分片信息
        if not os.path.exists(self.file_split):
            documents = self._split_files(self.source_dir)
            _save(documents, self.file_split)

        # 抽取新闻片段的meta信息
        if not os.path.exists(self.file_meta):
            documents = _load(self.file_split)
            correct, incorrect = await self._extract_meta(documents)
            # 保存新闻片段及meta信息
            _save(correct, self.file_meta)
            _save(incorrect, self.file_meta_err)
            logger.debug(f"Finished the Meta exracting {self.file_split} - Input: {len(documents)} / Output: {len(correct)}")
            return len(correct), len(incorrect)
        return 0, 0

    def _load_single_file(self, filename: str, content: str, splitter: TaggedTextSplitter) -> List[dict]:
        documents = []
        now = get_now()
        
        # 使用 NumberedTitleTextSplitter 进行分割
        sections = splitter.split_text(content)
        
        # 将每个分段转换为 Document 对象
        for i, section in enumerate(sections):
            metadata={"source": filename}
            metadata["doc_type"] = "txt"
            metadata["raw_name"] = filename
            metadata["chunk_id"] = i
            metadata["created"] = now

            documents.append({"page_content": section, "metadata": metadata})

        return documents

    def _split_files(self, folder_path: str) -> List[dict]:
        documents = []
        # 遍历文件夹中所有的文本文件
        for filename in os.listdir(folder_path):
            if filename.endswith(".txt"):
                file_path = os.path.join(folder_path, filename) 
                logger.debug(f"Split File {file_path}")

                documents.extend(
                    self._load_single_file(
                        filename,
                        get_text_content(file_path), 
                        splitter=TaggedTextSplitter()
                    )
                )
        
        return documents

    async def _extract_meta(self, documents: List[dict]) -> Tuple[List[dict], List[dict]]:
        extractor = self.llm.create("Extract Meta", prompt_extract_meta, News)

        async def ainvoke(doc: dict) -> dict:
            tries = 0
            while(True):
                try:
                    news = await extractor.ainvoke(input=doc["page_content"])
                    doc["metadata"]["title"] = news.title.strip()
                    doc["metadata"]["cameraman"] = news.cameraman or []
                    doc["metadata"]["reporter"] = news.reporter or []
                    doc["metadata"]["voiceover"] = news.voiceover or []
                    doc["metadata"]["reported_on"] = news.reported_on
                    doc["page_content"] = news.content.strip()

                    return doc
                except Exception as e:
                    if (tries >= 5):
                        logger.error(f"error in {e} after tried 5 times, will ignore this {doc.page_content}")
                        return doc 
                    else:
                        tries += 1
                        logger.error(f"error in {e} in trying {tries}")

        correct = []
        incorrect = []
        for result in tqdm_async(
            asyncio.as_completed([ainvoke(doc) for doc in documents]),
            total=len(documents),
            desc="Extracting meta from chunks",
            unit="chunk",
        ):
            doc = await result
            
            if (doc["metadata"]["title"] is None or 
                doc["metadata"]["title"].startswith('【固定') or 
                doc["metadata"]["title"].startswith('提要')):
                incorrect.append(doc)
            else:            
                correct.append(doc)

        return correct, incorrect
