# coding=utf-8
import re
import os
import json
import fitz  # PyMuPDF
import pickle
from docx import Document
from collections import OrderedDict
from pprint import pprint
from pdf2paragraph.get_page import extract_table_images
from utils.functions import split_directory, check_directory

# async def get_full_text(file_name: str, project_id: int, stage_round: int):
async def get_full_text(file_name: str):

    '''
    :param file_name: pdf文件名
    :return:
    '''

    pdf_file = f"./data/{file_name}.pdf"
    # docx_file = f"./data/{file_name}.docx"
    output_path = "./data/"

    # 如果段落JSON存在 跳过
    # file_path = os.path.join(output_path, file_name, 'pdf2paragraph.json')
    # directory_path = os.path.join(output_path, file_name, 'directory.pkl')
    # # 检查文件是否存在
    # if os.path.exists(file_path) and os.path.exists(directory_path):
    #     print('get_full_text 段落JSON存在 和 目录存在 跳过')
    #     return 'get_full_text 段落JSON存在 和 目录存在 跳过'

    # pdf_to_docx(pdf_file, docx_file)
    # document = Document(docx_file)
    # contract_text_dict = OrderedDict()
    # contract_text_list = []
    # for index, paragraph in enumerate(document.paragraphs):
    #     if not paragraph.text.strip():
    #         continue
    #     contract_text_dict[int(index)] = paragraph.text.strip()
    #     contract_text_list.append(paragraph.text.strip())

    # pprint(contract_text_list)
    doc = fitz.open(pdf_file)
    pages_num = len(doc)
    print('总页数', pages_num)

    # 获取目录
    toc = doc.get_toc()
    # 打印目录
    toc_title_page_list = []
    for level, title, page in toc:
        print(f"等级: {level}, 标题: {title}, 页码: {page}")
        if level == 1:
            toc_title_page_list.append({'标题': title, '页码': page})


    text_page_list = []
    for page_num in range(pages_num):
        print(f'=========================================page_num={page_num}==============================================')
        text_page_list += await extract_table_images(pdf_file, page_num, output_path, file_name)
    # pprint(text_page_list)

    # # 将列表写入 Pickle 文件
    # with open('../data.pkl', 'wb') as f:
    #     pickle.dump(text_page_list, f)
    #
    # # 从 Pickle 文件读取列表
    # with open('../data.pkl', 'rb') as f:
    #     text_page_list = pickle.load(f)


    # 去除页眉毛页脚
    new_text_page_list = []
    for text_page in text_page_list:
        if '8-1-' == text_page['text'].strip()[:4] or '8-2-' == text_page['text'].strip()[:4] or '8-3-' == text_page['text'].strip()[:4] or '8-4-' == text_page['text'].strip()[:4]:  # 过滤页脚
            continue
        elif text_page['top'] < 45 and '<Tables>' not in text_page['text'].strip() and '<Images>' not in text_page['text'].strip() and '<Rects>' not in text_page['text'].strip():  # 过滤页眉
            continue
        elif text_page['text'].isdigit() and text_page['x0'] > 270:  # 去除深圳页脚
            continue
            # extracted_number = int(text_page['text'])
            # if page_num - 5 <= extracted_number <= page_num + 5:
            #     continue
        else:
            new_text_page_list.append(text_page)

    text_page_list = new_text_page_list


    print('======text_page_list======')
    pprint(text_page_list)

   # 获取目录 directory_list: 目录列表 [标题...页码]
    directory_index = None
    for index, text in enumerate(text_page_list):
        if '目录' in re.sub(" ", "", text['text'].strip()):
            directory_list = []
            # 目录开始索引
            directory_index = index
            break

    # if directory_index is None and len(toc_title_page_list) == 0:  # 如果没有目录
    #     # 文件路径
    #     file_path = os.path.join('../error_json', f'error_get_full_text.json')
    #     # 检查文件是否存在
    #     if not os.path.exists(file_path):
    #         # 文件不存在，创建一个新的空JSON文件
    #         with open(file_path, 'w', encoding='utf-8') as f:
    #             # 写入一个空的JSON对象
    #             json.dump({}, f)  # 写入空字典作为JSON对象
    #         print(f"创建json文件 {file_path}")

        # error_text = 'get_full_text 未检测到目录和书签'
        # print('error_text:', error_text)
        # # 读取错误
        # with open(file_path, 'r', encoding='utf-8') as f:
        #     error_data_json = json.load(f)
        # error_data_json = OrderedDict(dict(error_data_json))
        # error_data_json[f'文件：{pdf_file}'] = error_text
        # # 写入错误
        # with open(file_path, 'w', encoding='utf-8') as f:
        #     json.dump(error_data_json, f, indent=4, ensure_ascii=False)
        #
        # # raise ValueError(error_text)
        # return '未检测到目录和书签'


    directory_text = ''
    directory_num = 0
    for index, text in enumerate(text_page_list[directory_index + 1:]):
        if directory_num < 5:  # 超过5行没有 '...' 则判断已经跳出目录 ...
            if '...' in text['text'].strip():
                directory_text += text['text'].strip()
                directory_list.append(directory_text)
                directory_text = ''
                directory_num = 0
            else:
                directory_text += text['text'].strip()
                directory_num += 1
        else:
            # 目录结束索引
            directory_end_index = directory_index + index - 5
            break
    # title_page_list = []

        # 切分目录获取标题和页码：title_page_list [{标题: str, 页码: int}]
    title_page_list = split_directory(directory_list)
    if len(title_page_list) == 0:  # 如何没有文本目录  则识别自带目录
        title_page_list = toc_title_page_list

    title_page_list = check_directory(title_page_list)
    title_page_list.insert(0, {'标题': '开头', '页码': 0})
    title_page_list.append({'标题': '结尾', '页码': len(doc)})


    full_text = ''
    full_text_list = []
    for index, text in enumerate(text_page_list):
        print('index', index)
        print('text', text)

        if '<Tables>' in str(text['text'].strip()) or '<Images>' in str(text['text'].strip()) or '<Rects>' in str(text['text'].strip()):
            full_text_list.append(full_text)
            full_text_list.append(text['text'].strip())
            full_text = ''
        elif directory_end_index == index:  # 如果遇到目录结尾强制换行
            full_text += text['text'].strip()
            full_text_list.append(full_text)
            full_text = ''

        # elif :  # 如果换页则换行

        elif text['text'].strip()[0] in ['一', '二', '三', '四', '五', '六', '七', '八', '九'] and re.sub(' ', '', text['text'])[1] == '、':  # 如果 一、
            full_text_list.append(full_text)
            full_text_list.append(text['text'].strip())
            full_text = ''

        elif '回复' in re.sub(" ", "", text['text'])[:5]:  # 如果 回复
            full_text_list.append(full_text)
            full_text = text['text'].strip()
            full_text_list.append(full_text)
            full_text = ''

        # elif re.sub(" ", "", text['text'])[0] == '（' and re.sub(" ", "", text['text'])[1] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:  # 解决问题中（1）单独做段落
        #     full_text_list.append(full_text)
        #     full_text = text['text'].strip()
        #     full_text_list.append(full_text)
        #     full_text = ''
        # elif '8-1-' in str(text['text'].strip()):  # 过滤页脚
        #     continue
        # elif text['top'] < 45:  # 过滤页眉
        #     continue
        elif text['x0'] > 91 and text['x1'] < 490:  # 标题 单独一行
            full_text_list.append(full_text)
            full_text_list.append('{:>{width}}'.format("", width=(text['x0'] - 91) / 8) + text['text'].strip())
            full_text = ''

        elif text['x1'] < 490:  # 行尾有空白
            # if text['text'].strip()[-1] == '。':
            full_text += text['text'].strip()
            full_text_list.append(full_text)
            full_text = ''

        elif text['x0'] > 91:  # 行前有空白
            full_text_list.append(full_text)
            full_text = ''
            full_text += '{:>{width}}'.format("", width=(text['x0'] - 91) / 8) + text['text'].strip()
        else:
            full_text += text['text'].strip()

    print('===================================全文==================================')
    pdf_text_list = []
    for index, text in enumerate(full_text_list):  # 过滤空字符串
        if not text.strip():
            continue
        pdf_text_list.append(text)
        # print('========================================================================')
        # print(text)

    pdf_text_dict = OrderedDict()


    table_html_dict = OrderedDict()
    for index, text in enumerate(pdf_text_list):
        if '<Tables>' in text.strip() or '<Images>' in text.strip() or '<Rects>' in text.strip():
            table_html_dict[text] = ''
        pdf_text_dict[index] = text

    return pdf_text_dict
    # 保存pdf为json数据
    # os.makedirs(os.path.join(output_path, file_name), exist_ok=True)
    # # 将字典保存为JSON文件
    # with open(os.path.join(output_path, file_name, 'pdf2paragraph.json'), 'w', encoding='utf-8') as f:
    #     json.dump(pdf_text_dict, f, ensure_ascii=False, indent=4)
    # #
    # # with open(os.path.join(output_path, file_name, 'table2html.json'), 'w', encoding='utf-8') as f:
    # #     json.dump(table_html_dict, f, ensure_ascii=False, indent=4)
    #
    # title_page_dict = OrderedDict()
    # break_index = -1
    # for index, title_page in enumerate(title_page_list[:-1]):
    #     title_page_dict[title_page['标题']] = []
    #     for text_index, text in pdf_text_dict.items():
    #         if '...' not in text.strip():
    #             k = len(re.sub(" ", "", title_page_list[index + 1]['标题'].strip()))
    #             if re.sub(" ", "", text.strip())[:k] == re.sub(" ", "", title_page_list[index + 1]['标题'].strip()):  # 如果 文本 = 下一个标题 跳出
    #                 if int(text_index) > break_index:  # 如果下一个问题标题出现在是上一个问题标题之后才记录
    #                     break_index = int(text_index)
    #                     break
    #         if int(text_index) < break_index:
    #             continue
    #         else:
    #             title_page_dict[title_page['标题']].append(text.strip())
    #
    #     if title_page_list[index + 1]['标题'].strip() != '结尾' and int(text_index) == len(pdf_text_dict) - 1:
    #         print(f"拿到目录后切问题报错：{title_page['标题']}")  # 这里设置报错
    #         return f"拿到目录后切问题报错：{title_page['标题']}"
    #
    # print('=============获取小标题前title_page_list')
    # pprint(title_page_list)
    #
    # # 重新检测获取小标题目录
    # for title, paragraph_list in title_page_dict.items():
    #     # print('title', title)
    #     if title.strip()[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] or '问题' in title.strip():  # 是否是问题
    #         print('问题=》', title)
    #         if len(paragraph_list) == 1:  # 是大标题的小标题已经被切分{’大问题‘：【大问题】}
    #             continue
    #         else:
    #             if paragraph_list[1].strip()[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] or paragraph_list[1].strip()[:2] == '问题':  # 如果小标题以数字或者问题开头
    #                 try:
    #                     first_match = re.search(r'\d+', paragraph_list[0].strip())
    #                     second_match = re.search(r'\d+', paragraph_list[1].strip())
    #                 except:
    #                     continue
    #             else:
    #                 continue
    #         if first_match and second_match:
    #             first_number = first_match.group()
    #             second_number = second_match.group()
    #         else:
    #             print('第一行中没有数字')
    #             continue
    #
    #         if first_number == second_number:  # 如果第一个段落的数字 =  第二个段落的数字 则包含小标题
    #             if paragraph_list[1].strip()[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
    #                 match_type = ''
    #             elif paragraph_list[1].strip()[:2] == '问题':
    #                 match_type = '问题'
    #             else:
    #                 raise ValueError('匹配小标题类型未知')
    #             decimal_num = 0  # 检查 是否有多个小数的小数点前的数字与大标题相同 来检查是否有小标题
    #
    #             # decimal_list = [] ###
    #             for paragraph_index, paragraph in enumerate(paragraph_list):
    #                 # 正则表达式以匹配小数
    #                 paragraph_match = re.search(r'\d+\.\d+', paragraph.strip())
    #                 if paragraph_match:
    #                     paragraph_num = paragraph_match.group()
    #                 else:
    #                     continue  # 没有小数跳过
    #                 if paragraph_num.split('.')[0].strip() == str(first_number).strip() and paragraph_num.split('.')[1].strip() == str(decimal_num + 1):  # 如果小数点前等于大标题数字 and 小数点后数字升序
    #                     decimal_num += 1
    #                     # decimal_list.append(paragraph.strip()) ###
    #             if decimal_num > 1:
    #                 print("第一行出现的数字是：", first_number)
    #                 print("第二行出现的数字是：", second_number)
    #                 # print('reply_num', reply_num)
    #                 print('decimal_num', decimal_num)
    #                 for index, title_page in enumerate(title_page_list.copy()):
    #                     print(type(title_page), title_page)
    #                     print(title, title_page['标题'])
    #                     if title == title_page['标题']:
    #                         for i in range(1, decimal_num + 1):
    #                             if re.sub(' ', '', title_page_list[index + i]['标题'])[:len(f'{match_type}{first_number}.{i}')] == f'{match_type}{first_number}.{i}':
    #                                 continue
    #                             else:
    #                                 title_page_list.insert(index + i, {'标题': f'{match_type}{first_number}.{i}', '页码': 0})
    #                                 # title_page_list.insert(index + i, {'标题': f'{decimal_list[i - 1]}', '页码': 0})  ###
    #
    #
    # print('获取小标题后title_page_list=============')
    # pprint(title_page_list)

    # with open(os.path.join(output_path, file_name, 'directory.pkl'), 'wb') as f:
    #     pickle.dump(title_page_list, f)


    # 格式化
    # title_page_dict = json.dumps(title_page_dict, indent=4, ensure_ascii=False)





if __name__ == '__main__':
    # file_name = '1662_1'
    # file_name = '上海_1836_2'
    # file_name = '上海_1363_2'
    # file_name = '上海_1409_0'
    # file_name = 'sh_1836_2024-10-24_2'
    # file_name = '问题int'
    # file_name = '问题int_str'
    # file_name = '上海_1387 (2)'
    # file_name = '上海_1778 (1)'
    # file_name = '8-1发行人及中介机构关于第二轮审核问询函的回复意见_2024-08-07'
    # file_name = '8-1 发行人及保荐机构回复意见_2022-10-13'
    file_name = '8-1-2 发行人及保荐机构关于第二轮审核问询函的回复（修订版）_2023-09-05'
    # file_name = 'dir3_8-1-2 发行人及保荐机构回复意见（二）（2020年报财务数据更新版）_2021-06-23'



    pdf_text_dict = get_full_text(file_name)
    print()