import os
import time
import paddle
import re
from PIL import Image
import numpy as np

paddle.disable_signal_handler()

os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新

from magic_pdf.operators.pipes import PipeResult
from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
from magic_pdf.model.sub_modules.table.rapidtable.rapid_table import RapidTableModel
from magic_pdf.data.data_reader_writer import FileBasedDataWriter
from bs4 import BeautifulSoup

from loguru import logger

from PIL import Image
import os

# 定义过滤函数，保留中文、英文、数字
def filter_text(text):
    return ''.join(re.findall(r'[\u4e00-\u9fa5a-zA-Z0-9]', text))

# 封装主函数：处理HTML表格
# def process_html_table(html_content):
#     # 解析HTML
#     soup = BeautifulSoup(html_content, 'html.parser')
#
#     # 获取第一个 tr 的内容并过滤
#     first_tr = soup.find_all('tr')[0]
#     first_tr_content = [filter_text(td.get_text().strip()) for td in first_tr.find_all('td')]
#
#     # 遍历所有 tr，删除与第一个 tr 内容相同的行
#     for tr in soup.find_all('tr')[1:]:
#         tr_content = [filter_text(td.get_text().strip()) for td in tr.find_all('td')]
#         if tr_content == first_tr_content:
#             tr.decompose()
#
#     # 返回处理后的HTML
#     return soup
def process_html_table(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')

    # 获取第一行数据并过滤 + colspan 值
    first_tr = soup.find_all('tr')[0]
    first_tr_content = [filter_text(td.get_text().strip()) for td in first_tr.find_all('td')]
    first_tr_colspans = [int(td.get('colspan', 1)) for td in first_tr.find_all('td')]

    # 遍历所有 tr，删除与第一行内容相同的行以及 colspan-1 行
    tr_list = soup.find_all('tr')
    i = 1
    while i < len(tr_list):
        tr = tr_list[i]
        tr_content = [filter_text(td.get_text().strip()) for td in tr.find_all('td')]
        tr_colspans = [int(td.get('colspan', 1)) for td in tr.find_all('td')]

        if tr_content == first_tr_content:
            tr.decompose()
            # 删除 colspan-1 行
            for _ in range(sum(first_tr_colspans) - 1):
                if i < len(tr_list):
                    tr_list[i].decompose()
                    i += 1
        else:
            i += 1

    return soup


def merge_images_vertically(image_path1, image_path2, local_image_dir='', quality=100, max_height=800):
    # 确保存储目录存在，如果不存在就创建
    if not os.path.exists(local_image_dir):
        os.makedirs(local_image_dir)

    # 获取相对路径对应的完整路径
    full_image_path1 = os.path.join(local_image_dir, image_path1)
    full_image_path2 = os.path.join(local_image_dir, image_path2)

    # 打开两张图片
    img1 = Image.open(full_image_path1)
    img2 = Image.open(full_image_path2)
    # 如果两者的高度加起来超过最大高度，则直接返回空值
    if img1.height + img2.height > max_height:
        # print(img1.height + img2.height)
        return None, None
    # print('ggg',img1.height + img2.height)
    # 计算目标尺寸
    target_width = (img1.width + img2.width)//2
    target_height = img1.height + img2.height

    # 调整图像宽度，使两张图像宽度一致
    img1_resized = img1.resize((target_width, int(img1.height * target_width / img1.width)))
    img2_resized = img2.resize((target_width, int(img2.height * target_width / img2.width)))

    # 纵向合并图像
    combined_image = Image.new("RGB", (target_width, img1_resized.height + img2_resized.height))
    combined_image.paste(img1_resized, (0, 0))
    combined_image.paste(img2_resized, (0, img1_resized.height))

    # 调整合并后的图像大小以控制文件大小
    combined_image = combined_image.resize((target_width, target_height))

    # 获取文件名（不包括扩展名）
    file_name1 = os.path.splitext(os.path.basename(image_path1))[0]
    file_name2 = os.path.splitext(os.path.basename(image_path2))[0]

    # 拼接两个文件名生成合并后的文件名
    merged_file_name = f"{file_name1}_{file_name2[:6]}.jpg"

    # 构建合并后的图片保存路径，保存在指定目录
    output_path = os.path.join(local_image_dir, merged_file_name)

    # 保存合并后的图像，调整质量以接近目标文件大小
    combined_image.save(output_path, quality=quality)  # 质量可以调整为合适的值（例如85）

    return output_path, merged_file_name  # 返回保存路径，以便查看


def jpg_to_img(jpg_path) -> dict:
    """Convert JPG image to numpy array.

    Args:
        jpg_path (str): Path to the JPG file.

    Returns:
        dict: {'img': numpy array, 'width': width, 'height': height}
    """
    # 打开JPG图片文件
    img_pil = Image.open(jpg_path)

    # 将PIL图片转换为numpy数组
    img_array = np.array(img_pil)

    # 获取图片的宽度和高度
    width, height = img_pil.size

    # 返回图片数据字典
    img_dict = {'img': img_array, 'width': width, 'height': height}
    return img_dict

def tabel_processing(pdf_info:list,
                     model,
                     local_image_dir):
    # 遍历页面
    id =  0
    while 1:
        if id == len(pdf_info)-1:
            break
        # 分别获取相邻两页
        pre_page, next_page = pdf_info[id], pdf_info[id+1]
        # print(pdf_info[id+1])
        if len(pre_page['para_blocks']) <= 1 or len(next_page['para_blocks'])<=1:
            logger.warning(f"id={id},内容元素个数={len(pre_page['para_blocks'])}；id={id+1},内容元素个数={len(next_page['para_blocks'])}")
            id += 1
            continue
        # 获取前一页的-1块，获取后一页的0块
        pre_page_end_block = pre_page['para_blocks'][-1]
        # print(next_page['para_blocks'])    
        next_page_first_block = next_page['para_blocks'][0]
        # 如果两个皆为table类型，进入
        flag = 0
        if pre_page_end_block['type'] == next_page_first_block['type'] == 'table':
            # 获取两者的二级块blocks
            pre_page_two_block = pre_page_end_block['blocks']
            next_page_two_block = next_page_first_block['blocks']
            # 如果前者最后一个二级块类型为table_body，后者第一个二级块类型为table_body
            if pre_page_two_block[-1]['type'] == next_page_two_block[0]['type'] == 'table_body':
                # 获取两者图片名
                pre_img_name = pre_page_two_block[-1]['lines'][0]['spans'][0]['image_path']
                next_img_name = next_page_two_block[0]['lines'][0]['spans'][0]['image_path']
                # 合成新图片
                new_img_path, new_img_name = merge_images_vertically(pre_img_name,next_img_name,local_image_dir)
                if not new_img_name:
                    id += 1
                    continue
                # 读取新图片
                img_dict = jpg_to_img(new_img_path)
                img = img_dict['img']
                # 表格识别
                logger.info("开始进行小跨页表格识别，处理图像：{}", new_img_path)
                result = model.predict(img)
                # 记录预测结果
                if result:
                    logger.info("表格识别成功，HTML代码生成完毕。")
                    logger.info("time: {} 秒",result[-1])
                else:
                    logger.warning("表格识别失败，未能生成有效的结果。")
                # print(result)
                # 删除重复标题行

                # result[0]['html'] = process_html_table(result[0]['html'])
                # 遍历并处理包含HTML的元素
                # for item in result:
                #     if 'html' in item:  # 仅当存在html键时处理
                #         item['html'] = process_html_table(item['html'])
                table_html = process_html_table(result[0])
                # print(result)
                # 替换前者相关数据
                pre_page_two_block[-1]['lines'][0]['spans'][0]['image_path'] = new_img_name
                pre_page_two_block[-1]['lines'][0]['spans'][0]['html'] = table_html
                # 删除后者相关数据
                next_page_two_block[0]['lines'][0]['spans'][0]['image_path'] = ''
                next_page_two_block[0]['lines'][0]['spans'][0]['html'] = ''
                flag = 1
        # 判断更改后的后一页是否只有一个元素,且该元素只有一个body
        id += 1
        if flag == 1:
            if len(next_page['para_blocks']) == 1:
                if len(next_page_two_block) == 1:
                    pdf_info.pop(id)
                    # id -= 1

    return pdf_info


def table_pipeline_change(
    pipe_res:dict,
    local_image_dir,
    ocr: bool = False,
    show_log: bool = True,
    lang=None,
    layout_model=None,
    formula_enable=None,
    table_enable=None,
) -> PipeResult:
    # # 实例化模型管理器
    # model_manager = ModelSingleton()
    # custom_model = model_manager.get_model(
    #     ocr, show_log, lang, layout_model, formula_enable, table_enable
    # )
    custom_model = RapidTableModel(None)

    # 表格处理
    pipe_res['pdf_info'] = tabel_processing(pipe_res['pdf_info'],custom_model,local_image_dir)
    return pipe_res

   

if __name__ == '__main__':
    table_pipeline_change('/home/fengjie/MinerU/demo/output/images/1b3844585ada07265c38bcbc49b34473e7308f423a799083fe4f14abbea8078c.jpg')







