from transformers import AutoTokenizer
from typing import List, Dict
from langchain.schema import Document
from tqdm import trange
import fitz
    
    
class PDFSplitter():
    def __init__(self, 
                 pdf_path: str = 'assets/初赛训练数据集.pdf',  
                 tokenizer_dir: str = 'plms/bge-large-zh'):
        super().__init__()
        self.pdf_path = pdf_path
        self.tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
           
    def parse_table_of_content(self, table_name: str = '目录', max_page_index: int = 20, heading1_min_size: int = 10) -> List[Dict[str, str]]:
        """解析目录,返回目录的结构
        """
        pdf = fitz.open(self.pdf_path)
        pages = list(pdf)
        headings = []
        heading1 = ''
        for page in pages[:max_page_index]:
            # 如果是空页则跳过
            page_text = page.get_text()
            if not page_text:
                continue
            # 获取本页的所有block
            page_blocks = page.get_text('dict')['blocks']
            # 一般前两个block是页码和目录名
            block0 = page_blocks[0]
            block1 = page_blocks[1]
            if block1['lines'][0]['spans'][0]['text'] == table_name:
                start_idx = 2
            elif block0['lines'][0]['spans'][0]['text'] == table_name:
                start_idx = 1
            else:
                start_idx = 0
            if start_idx > 0:
                for block in page_blocks[start_idx:]:
                    for line in block['lines']:
                        spans = line['spans']
                        # 根据字体的大小找到heading1
                        if len(spans) == 1 and spans[0]['size'] > heading1_min_size:
                            heading1 = spans[0]['text']
                        # 找到heading2
                        else:
                            heading2 = ''
                            line_text = ''
                            for span in spans:
                                if span['size'] < 8:
                                    line_text += span['text']
                            heading2 = line_text.split('.')[0]
                            if heading2:
                                page_idx = int(line_text.split('.')[-1])
                                headings.append({'heading2': heading2.strip(), 'heading1': heading1.strip(), 'start_page_idx': page_idx})
            else:
                continue
        return headings
                    
    
    def split_by_heading2(self) -> List[Document]:
        """按照heading2(目录中索引的标题)进行切分
        
        思路:
            - 1. 解析目录表
            - 2. 根据目录表中的每个标题的页码,找到相关页
            - 3. 根据标题匹配,假设匹配到的第一个为标题的开始
            - 4. 根据目录表中的当前标题的下一个标题找到相关页,下一个标题的起始为当前标题的结束.
        """
        contents = self.parse_table_of_content()
        pages = fitz.open(self.pdf_path)
        docs = []
        for i in trange(len(contents) - 1):
            content = contents[i]
            next_content = contents[i+1]
            if content['start_page_idx'] == next_content['start_page_idx']: # 如果是同一页,直接匹配
                page_text = pages[content['start_page_idx']-1].get_text() # 之所以-1 是因为pages是从0开始的,而页码是从1开始的
                heading2_start = page_text.index(content['heading2']) 
                heading2_end = page_text.index(next_content['heading2'], heading2_start)
                heading2_text = page_text[heading2_start: heading2_end]
                metadata = content
                metadata['num_tokens'] = len(self.tokenizer.tokenize(heading2_text, add_special_tokens=False))
                metadata['end_page_idx'] = content['start_page_idx']
                docs.append(Document(page_content=page_text[heading2_start: heading2_end], metadata=metadata))
            # 如果不在一个页面,则把两个页面中间的页的内容也加上
            else:
                page_text = pages[content['start_page_idx']-1].get_text()
                heading2_start = page_text.index(content['heading2'])
                heading2_start_page_text = page_text[heading2_start:]
                for j in range((content['start_page_idx']), next_content['start_page_idx']):
                    heading2_start_page_text += pages[j].get_text()
                end_page_text = pages[next_content['start_page_idx']-1].get_text()
                try:
                    heading2_end = end_page_text.index(next_content['heading2'])
                except:
                    print(next_content)
                    break
                heading2_end_page_text = end_page_text[:heading2_end]
                heading2_start_page_text += heading2_end_page_text
                metadata = content
                metadata['num_tokens'] = len(self.tokenizer.tokenize(heading2_start_page_text, add_special_tokens=False))
                metadata['end_page_idx'] = next_content['start_page_idx']
                docs.append(Document(page_content=heading2_start_page_text, metadata=metadata))
            if i == (len(contents) - 2): # 此时只剩目录中
                page_text = pages[next_content['start_page_idx']-1].get_text()
                heading2_start = page_text.index(next_content['heading2'])
                heading2_start_page_text = page_text[heading2_start:]
                for j in range((next_content['start_page_idx']+1), len(pages)):
                    heading2_start_page_text += pages[j].get_text()
                metadata = next_content
                metadata['num_tokens'] = len(self.tokenizer.tokenize(heading2_start_page_text, add_special_tokens=False))
                metadata['end_page_idx'] = len(pages)
                docs.append(Document(page_content=heading2_start_page_text, metadata=metadata))
        docs = self.deduplicate(docs)
        return docs
    
    def split_by_chunks(self, chunke_size: int = 500, chunk_overlap: int = 125) -> List[Document]:
        """在heading2切分的基础上继续按照最大长度切分
        """
        documents = self.split_by_heading2()
        chunk_docs = []
        for i, doc in enumerate(documents):
            page_content = doc.page_content
            if len(page_content) > chunke_size:
                for j in range(0, len(page_content), chunke_size - chunk_overlap):
                    text = page_content[j: j+chunke_size]
                    if len(text) > chunk_overlap: # 如果最后一段小于overlap,则不加入,因为已经在上一段中了
                        metadata = doc.metadata
                        metadata['doc_idx'] = i
                        metadata['num_tokens'] = len(self.tokenizer.tokenize(text, add_special_tokens=False))
                        metadata['raw_text'] = page_content
                        chunk_docs.append(Document(page_content=text, metadata=metadata))
            else:
                metadata = doc.metadata
                metadata['doc_idx'] = i
                metadata['num_tokens'] = len(self.tokenizer.tokenize(page_content, add_special_tokens=False))
                metadata['raw_text'] = page_content
                chunk_docs.append(Document(page_content=page_content, metadata=metadata))
        return chunk_docs
    
    def split_by_blocks(self, block_size: int = 500, block_overlap: int = 125) -> List[Document]:
        """根据

        Args:
            block_size (int, optional): _description_. Defaults to 500.
            block_overlap (int, optional): _description_. Defaults to 125.

        Returns:
            List[Document]: _description_
        """
        block_texts = []
        with fitz.open(self.pdf_path) as pdf:
            for page in pdf:
                page_block_texts = []
                blocks = page.get_text('blocks')
                for i, block in enumerate(blocks):
                    block_text = block[4][:-1] # 去掉\n
                    if block_text.isdigit():
                        # 去掉页码
                        continue
                    if '----' in block_text or '....' in block_text:
                        # 去掉目录
                        continue
                    page_block_texts.append(block_text)
                tables = list(page.find_tables())
                if tables:
                    # 找到table对应的block替换为模型可读顺序文本
                    for table in tables:
                        header = table.extract()[0]
                        header = [str(h) for h in header]
                        header_text = '\n'.join(header)
                        num_table_rows = table.row_count
                        table_text = '以下是一张表格:表格的表头为:{}'.format(header)
                        table_text += '表格的信息如下:\n'
                        for table_row in table.extract()[1:]:
                            row_text = ''
                            for row_item in zip(header, table_row):
                                row_text += '{}:{}'.format(row_item[0], row_item[1])
                            table_text += row_text
                        table_text += '\n'
                        for i, page_block_text in enumerate(page_block_texts):
                            if header_text in page_block_text:
                                table_start_idx = i
                                table_end_idx = i + num_table_rows
                                break
                        page_block_texts[table_start_idx: table_end_idx] = [table_text]
                block_texts.extend(page_block_texts)
        docs = []
        document_text = ''
        for block_text in block_texts:
            num_previous_tokens = len(self.tokenizer.tokenize(document_text, add_special_tokens=False))
            num_block_tokens = len(self.tokenizer.tokenize(block_text, add_special_tokens=False))
            if num_block_tokens > block_size:
                docs.append(Document(page_content=document_text, metadata={'num_tokens': num_previous_tokens}))
                document_text = ''
                docs.append(Document(page_content=block_text, metadata={'num_tokens': num_block_tokens}))
                continue
            if (num_previous_tokens + num_block_tokens) >= block_size:
                doc = Document(page_content=document_text, metadata={'num_tokens': len(self.tokenizer.tokenize(document_text, add_special_tokens=False))})
                docs.append(doc)
                document_text = ''
                continue
            document_text += block_text
        return self.deduplicate(docs)
                
    
    def deduplicate(self, docs: List[Document]) -> List[Document]:
        """去除重复的文本
        """
        dedup_docs = []
        dedup_texts = []
        for doc in docs:
            if doc.page_content not in dedup_texts:
                dedup_docs.append(doc)
                dedup_texts.append(doc.page_content)
        return dedup_docs
    
    def get_pdf_text(self, start_page: int = 12) -> str:
        """获取pdf的文本,跳过目录部分

        Args:
            start_page (int, optional): 跳过目录的开始页码. Defaults to 12.

        Note:
            pdf的每一页不一定都有页码
        """
        pdf = fitz.open(self.pdf_path)
        text = ''
        for page_idx, page in enumerate(pdf):
            if page_idx > start_page :
                page_text = page.get_text()
                if not page_text:
                    continue
                text += page_text
        return text