import json
import os
import re
import uuid
from collections import defaultdict
from pathlib import Path

import pandas as pd
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from config import embedder_config, milvus_config

from config import CHAPTER, max_len, overlap, sep_patterns


def level_sep(lv, text):
    lv_str = r'\.'.join([r'\d{1,2}'] * lv)
    pattern = rf'(?m)^#{{1,6}}\s*({lv_str})(?!\.\d+)\s*(.*)$'
    matches = list(re.finditer(pattern, text))
    sections = []
    for i, match in enumerate(matches):
        start = match.end()
        end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
        body = text[start:end].strip()
        section = {
            "number": match.group(1),
            "title": match.group(2).strip(),
            "content": body
        }

        # 递归解析子层级（lv+1）
        subsections = level_sep(lv + 1, body)
        if subsections:
            section["subsections"] = subsections

        sections.append(section)

    return sections


def find_consecutive_subsequences(arr):
    if not arr:
        return []  # 或 raise ValueError("空列表")

    result = []
    start = 0

    for i in range(1, len(arr)):
        if arr[i] != arr[i - 1] + 1:
            if i - start >= 2:
                result.append(arr[start:i])
            start = i

    # 处理最后一个可能的子序列
    if len(arr) - start >= 2:
        result.append(arr[start:])

    # 如果找到子序列，返回它们；否则返回第一个元素
    if result:
        if result[0] and arr[0] not in result[0]:
            return [[arr[0]]]
        return result
    return [[arr[0]]]


def process_docs(documents, pdf_tuples, clean_re):
    cur_page = 6
    for j, doc in enumerate(documents):
        page_content = doc.page_content.strip()
        if not page_content:
            page_content = doc.metadata['title'].popitem()[-1]
        cleaned_text = re.sub(clean_re, '', page_content)
        page_to_boxes = defaultdict(list)

        span_pos = []
        for i, (page_idx, span_text, bbox) in enumerate(pdf_tuples):
            if span_text in cleaned_text:
                span_pos.append(i)
            if page_idx - cur_page > 4:
                break
        span_pos = find_consecutive_subsequences(span_pos)[0]
        print(j, cleaned_text, span_pos)
        for i in span_pos:
            page_idx, span_text, bbox = pdf_tuples[i]
            page_to_boxes[page_idx].append(bbox)
            cur_page = page_idx
        pdf_tuples = pdf_tuples[span_pos[-1] + 1:]
        doc.metadata['pbox'] = page_to_boxes


def get_span_ls(page_info, clean_re):
    pdf_tuples = []
    for page_idx, page_data in enumerate(page_info):
        for block_idx, block_data in enumerate(page_data['para_blocks']):
            # 根据类型选取 spans 来源
            if block_data['type'] == 'image' and len(block_data['blocks']) > 1:
                lines = block_data['blocks'][1]['lines']
            elif block_data['type'] == 'table':
                lines = []
                for blk in block_data['blocks'][:2]:
                    lines += blk.get('lines', [])
            else:
                lines = block_data.get('lines', [])
            for line_idx, line in enumerate(lines):
                for span_idx, span in enumerate(line['spans']):
                    text = re.sub(clean_re, '', span.get('content') if 'content' in span else span.get('html', ''))
                    if text:
                        pdf_tuples.append((page_idx + 6, text, span['bbox']))
    return pdf_tuples


def clean_md(content):
    # 配置
    lv4_pattern = re.compile(r'(?m)^(\d{1,2}\s*\.\s*.*$)')  # 匹配诸如1.ds特点\n 2.场景...等标题，记为4级标题
    lv5_pattern = re.compile(r'(?m)^([(（]\s*\d{1,2}\s*[)）]\s*\.*\s*.*$)')  # 匹配诸如 （1）.ds特点\n (2).场景...等标题，记为5级标题
    content = re.sub('(\s*\n\s*){2,}', '\n', content)
    docs = []
    splitter = RecursiveCharacterTextSplitter(
        separators=sep_patterns,
        is_separator_regex=True,
        chunk_size=max_len,
        chunk_overlap=overlap,
        strip_whitespace=True
    )
    sections = level_sep(2, content)
    sub_heading_re = re.compile(r'(^|\n)+#\s*')
    for sec in sections:
        chap_no = sec["number"].split('.')[0]
        base_attr = {
            1: f"{CHAPTER[chap_no]}",
            2: f"{sec['number']} {sec['title']}"
        }

        # 单一章节或含子节时，统一生成“(attr, content)”对
        entries = []
        if 'subsections' in sec:
            for sub in sec['subsections']:
                attr = {**base_attr, 3: f"{sub['number']} {sub['title']}"}
                text = re.sub(sub_heading_re, '\n', sub['content'])
                entries.append((attr, text))
        else:
            text = re.sub(sub_heading_re, '\n', sec['content'])
            entries.append((base_attr, text))

        # 按块拆分并生成 Document
        for attr, lv3_body in entries:
            lv4_matches = list(re.finditer(lv4_pattern, lv3_body))
            sep_blocks = []
            sep_attr = []
            if lv4_matches:
                for lv4_i, lv4_match in enumerate(lv4_matches):
                    lv4_pfx = lv4_match.start()
                    lv4_title = lv4_match.group(1)
                    if lv4_i == 0 and lv4_pfx != 0:
                        lv4_pfx_txt = lv3_body[0:lv4_pfx]
                        ebs = splitter.split_text(lv4_pfx_txt)
                        ac = attr.copy()
                        sep_blocks.extend(ebs)
                        sep_attr.extend([ac] * len(ebs))

                    lv4_s = lv4_match.end()
                    lv4_e = lv4_matches[lv4_i + 1].start() if lv4_i + 1 < len(lv4_matches) else len(lv3_body)
                    lv4_body = lv3_body[lv4_s:lv4_e].strip()
                    lv4_txt = lv4_title + '\n' + lv4_body
                    if len(lv4_txt) <= max_len:
                        sep_blocks.append(lv4_body)
                        ac = attr.copy()
                        ac[4] = lv4_title
                        sep_attr.append(ac)

                    else:
                        lv5_matches = list(re.finditer(lv5_pattern, lv4_body))
                        if lv5_matches:
                            for lv5_i, lv5_match in enumerate(lv5_matches):
                                lv5_pfx = lv5_match.start()
                                lv5_title = lv5_match.group(1)
                                if lv5_i == 0 and lv5_pfx != 0:
                                    lv5_pfx_txt = lv4_body[0:lv5_pfx]
                                    ebs = splitter.split_text(lv5_pfx_txt)
                                    sep_blocks.extend(ebs)
                                    ac = attr.copy()
                                    ac[4] = lv4_title
                                    sep_attr.extend([ac] * len(ebs))

                                lv5_s = lv5_match.end()
                                lv5_e = lv5_matches[lv5_i + 1].start() if lv5_i + 1 < len(lv5_matches) else len(
                                    lv4_body)
                                lv5_body = lv4_body[lv5_s:lv5_e].strip()
                                lv5_txt = lv4_title + '\n' + lv5_title + '\n' + lv5_body
                                lv5_txt = lv5_txt.replace('\n\n', '\n')
                                if len(lv5_txt) <= max_len:
                                    sep_blocks.append(lv5_body)
                                    ac = attr.copy()
                                    ac[4] = lv4_title
                                    ac[5] = lv5_title
                                    sep_attr.append(ac)
                                else:
                                    ac = attr.copy()
                                    ac[4] = lv4_title
                                    ac[5] = lv5_title
                                    ebs = splitter.split_text(lv5_body)
                                    sep_blocks.extend(ebs)
                                    sep_attr.extend([ac] * len(ebs))
                        else:
                            ebs = splitter.split_text(lv4_body)
                            sep_blocks.extend(ebs)
                            ac = attr.copy()
                            ac[4] = lv4_title
                            sep_attr.extend([ac] * len(ebs))
            else:
                ebs = splitter.split_text(lv3_body)
                sep_blocks.extend(ebs)
                ac = attr.copy()
                sep_attr.extend([ac] * len(ebs))
            for metadata, chunk in zip(sep_attr, sep_blocks):
                docs.append(
                    Document(page_content=chunk, metadata={'title': metadata}))  # '\n\n'.join(attr.values())

    return docs


def get_documents(middle_json_path, out_md_path):
    output_json = Path(middle_json_path)
    project_root = Path(__file__).resolve().parent
    os.chdir(project_root)
    # 预编译正则
    clean_re = re.compile(r'[^\w\u4e00-\u9fa5]')
    md_path = Path(out_md_path)
    content = md_path.read_text(encoding='utf-8')
    docs = clean_md(content)
    # 加载并裁剪 PDF 信息
    spans_data = json.loads(output_json.read_text(encoding='utf-8'))
    page_span_info = spans_data['pdf_info'][6:]  # 丢弃前6页
    span_ls = get_span_ls(page_span_info, clean_re)
    # 为每个文档块匹配页码和 bbox
    process_docs(docs, span_ls, clean_re)
    return docs


def get_batche_index(ls_len, batch_size):
    """
    获取批量索引,方便迭代，例如：
    ls_len=99500
    batch_size=1000
    输出:
    [(0:1000),(1000,2000), ... ,(99000,99500)]
    :param ls_len:
    :param batch_size:
    :return:
    """
    start = range(0, ls_len, batch_size)
    end = list(range(batch_size, ls_len + batch_size, batch_size))
    end[-1] = ls_len
    return [(s, e) for s, e in zip(start, end)]


if __name__ == "__main__":
    middle_json_path = '/data/ly2/rag_ly/output/《高效使用DeepSeek》_middle.json'
    out_md_path = '/data/ly2/rag_ly/manu_output/《高效使用DeepSeek》.md'
    split_docs = get_documents(middle_json_path, out_md_path)
    data = {'vecId': [], "blockDenseEmbeddings": [], 'block': [], 'posInfo': []}
    for itm in split_docs:
        title = itm.metadata['title']  # dict
        block = "\n".join(title.values()) + '\n' + itm.page_content
        vid = str(uuid.uuid5(uuid.NAMESPACE_URL, block))
        if vid not in data['vecId']:
            data['vecId'].append(vid)
            data['block'].append(block)
            data['posInfo'].append(json.dumps(itm.metadata))
    from milvus_op import MilvusOP
    from sentence_transformers import SentenceTransformer as TextEmbeder

    embedder = TextEmbeder(**embedder_config)
    for s, e in get_batche_index(len(data['block']), 100):
        embeddings = embedder.encode(data['vecId'][s:e], show_progress_bar=True)
        data['blockDenseEmbeddings'].extend(embeddings)
    milvus_op = MilvusOP(**milvus_config)
    milvus_op.collection.drop()
    milvus_op.create_collection()
    milvus_op.collection.insert(pd.DataFrame(data))
