"""
合同文件解析
"""
from core.contract import Contract
from core.document import Document
from core.clean_data import clean_data
from core.text_splitter import TextSplitter
from typing import Optional, List, Dict
from core.toc_parse import TocParser
from core.config import settings


class ContractParser(object):

    def __init__(self, chunk_size: int = 300, chunk_overlap: int = 50):
        self.text_spliter = TextSplitter(chunk_size, chunk_overlap)
        self.toc_parser = TocParser()

    def parse(self, contract_data: Dict) -> List[Document]:
        """
        合同文本解析, contract 的 paragraphs是已排序好的。
        """

        # 合同数据清洗
        cleaned_data = clean_data(contract_data)

        contract = Contract(
            id=cleaned_data['fileId'],
            name=cleaned_data['documentName'],
            paragraphs=cleaned_data['textBlocks'],
            metadata=None
        )

        toc = self.toc_parser.parse(contract)
        documents = self.split_paragraphs(toc, contract.paragraphs)
        return documents

    def split_paragraphs(self, toc: Optional[Dict], paragraphs: List[Dict]) -> List[Document]:
        """
        段落内容切分， 并返回包装好的分割文本片段.
        - toc: key 为段落索引，value为标题
        - paragraphs: key 为段落索引， value为段落内容
        - 如果段落非标题，则还需对段落内容进行一定的上下文扩展.
        """
        toc = toc or {}
        top_title = {'index': None, 'content': ''}
        sub_title = {'index': None, 'content': ''}

        # index 与 paragraph 的对应关系
        index2paragraph = {}
        for paragraph in paragraphs:
            block_index = paragraph['blockIndex']
            index2paragraph[block_index] = paragraph

        all_documents = []
        for paragraph in paragraphs:

            # 分割段落文本
            block_index = paragraph['blockIndex']
            metadata = {
                'id': paragraph['id'],
                'block_index': block_index
            }
            if block_index in toc:
                docs = self.text_spliter.create_documents(toc[block_index], metadata)
            else:
                docs = self.text_spliter.create_documents(paragraph['content'], metadata)

            # 获取标题
            if block_index in toc:
                toc_title = toc[block_index]
                if str(toc_title).startswith('## '):
                    top_title = {'index': block_index, 'content': toc_title}
                    sub_title = {'index': None, 'content': ''}
                else:
                    sub_title = {'index': block_index, 'content': toc_title}

            for doc in docs:
                related_blocks = {}
                # 先把当前分割的文本放进去
                metadata = doc.metadata
                key = f"{metadata['block_index']}-{metadata['start_index']}"
                related_blocks[key] = {
                    'id': metadata['id'],
                    'content': doc.content
                }
                words_count = len(doc.content)

                # 依次考虑加入主标题，副标题
                for title in [top_title, sub_title]:
                    if title['index'] is None:
                        continue
                    title_count = len(title['content'])
                    if words_count + title_count > settings.DOCUMENT_SIZE:
                        continue
                    key = f"{title['index']}-{0}"
                    if key not in related_blocks:
                        related_blocks[key] = {
                            'id': index2paragraph[title['index']]['id'],
                            'content': title['content']
                        }
                        words_count += title_count

                # 依次考虑加入上下文，仅加入当前top_title层级之下的段落
                previous_continue_flag = True
                next_continue_flag = True
                for k in range(1, 5):
                    previous_index = block_index - k
                    valid_check = (top_title['index'] is None)
                    valid_check |= (top_title['index'] is None or previous_index > top_title['index'])
                    valid_check &= previous_continue_flag
                    valid_check &= (previous_index in index2paragraph)
                    if valid_check:
                        if previous_index in toc:
                            previous_content = toc[previous_index]
                            content_count = len(previous_content)
                            if words_count + content_count < settings.DOCUMENT_SIZE:
                                key = f"{previous_index}-{0}"
                                if key not in related_blocks:
                                    related_blocks[key] = {
                                        'id': index2paragraph[previous_index]['id'],
                                        'content': previous_content
                                    }
                                    words_count += content_count
                        else:
                            previous_paragraph = index2paragraph[previous_index]
                            previous_docs = self.text_spliter.create_documents(previous_paragraph['content'], metadata={})
                            if len(previous_docs) != 0:
                                previous_doc = previous_docs[-1]
                                previous_content = previous_doc.content
                                content_count = len(previous_content)
                                if words_count + content_count < settings.DOCUMENT_SIZE:
                                    key = f"{previous_index}-{previous_doc.metadata['start_index']}"
                                    if key not in related_blocks:
                                        related_blocks[key] = {
                                            'id': index2paragraph[previous_index]['id'],
                                            'content': previous_content
                                        }
                                        words_count += content_count

                                # 如果前向段落过长，则停止
                                if previous_doc.metadata['start_index'] != 0:
                                    previous_continue_flag = False

                    next_index = block_index + k
                    valid_check = (next_index not in toc)
                    valid_check |= (next_index in toc and not str(toc[next_index]).startswith('## '))
                    valid_check &= next_continue_flag
                    valid_check &= (next_index in index2paragraph)
                    if valid_check:
                        if next_index in toc:
                            next_content = toc[next_index]
                            content_count = len(next_content)
                            if words_count + content_count < settings.DOCUMENT_SIZE:
                                key = f"{next_index}-{0}"
                                related_blocks[key] = {
                                    'id': index2paragraph[next_index]['id'],
                                    'content': toc[next_index]
                                }
                                words_count += content_count
                        else:
                            next_paragraph = index2paragraph[next_index]
                            next_docs = self.text_spliter.create_documents(next_paragraph['content'], metadata={})
                            if len(next_docs) != 0:
                                next_doc = next_docs[0]
                                next_content = next_doc.content
                                content_count = len(next_content)
                                if words_count + content_count < settings.DOCUMENT_SIZE:
                                    key = f"{next_index}-{next_doc.metadata['start_index']}"
                                    related_blocks[key] = {
                                        'id': index2paragraph[next_index]['id'],
                                        'content': next_content
                                    }
                                # 如果后向段落过长，则停止
                                if next_doc.metadata['start_index'] != 0:
                                    next_continue_flag = False
                metadata['related_blocks'] = related_blocks
                doc.metadata = metadata
                all_documents.append(doc)

        return all_documents