import sys
sys.path.append('/home/FAST_DATA_MIRROR/Langchain-Chatchat-master')
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from typing import List
from tabel_and_images.read_image import parse_image_llava
import tqdm


class RapidOCRDocLoader(UnstructuredFileLoader):
    def _get_elements(self) -> List:
        def doc2text(filepath):
            from docx.table import _Cell, Table
            from docx.oxml.table import CT_Tbl
            from docx.oxml.text.paragraph import CT_P
            from docx.text.paragraph import Paragraph
            from docx import Document, ImagePart
            from PIL import Image
            from io import BytesIO
            import numpy as np
            from rapidocr_onnxruntime import RapidOCR
            ocr = RapidOCR()
            doc = Document(filepath)
            resp = ""

            def iter_block_items(parent):
                from docx.document import Document
                if isinstance(parent, Document):
                    parent_elm = parent.element.body
                elif isinstance(parent, _Cell):
                    parent_elm = parent._tc
                else:
                    raise ValueError("RapidOCRDocLoader parse fail")

                for child in parent_elm.iterchildren():
                    if isinstance(child, CT_P):
                        yield Paragraph(child, parent)
                    elif isinstance(child, CT_Tbl):
                        yield Table(child, parent)

            b_unit = tqdm.tqdm(total=len(doc.paragraphs)+len(doc.tables),
                               desc="RapidOCRDocLoader block index: 0")
            text_context, tabel_contexts = [], []
            for i, block in enumerate(iter_block_items(doc)):
                b_unit.set_description(
                    "RapidOCRDocLoader  block index: {}".format(i))
                b_unit.refresh()
                if isinstance(block, Paragraph):
                    resp += block.text.strip() + "\n"
                    text_context.append(block.text.strip())
                    images = block._element.xpath('.//pic:pic')  # 获取所有图片
                    for image in images:
                        for img_id in image.xpath('.//a:blip/@r:embed'):  # 获取图片id
                            part = doc.part.related_parts[img_id]  # 根据图片id获取对应的图片
                            image_context = '\n'.join(tabel_context[-100:])
                            if isinstance(part, ImagePart):
                                image = Image.open(BytesIO(part._blob))
                                # 使用大模型
                                # promt = f'''
                                #     图片上下文: {image_context},
                                #     请根据图片上下文,生成一个符合逻辑和上下文语境的图片描述。
                                #     图片描述:
                                # '''                                
                                # image_text = parse_image_llava(image)
                                # resp += f"\n{image_text}"
                                # OCR的方法
                                result, _ = ocr(np.array(image))
                                if result:
                                    ocr_result = [line[1] for line in result]
                                    resp += "\n".join(ocr_result)
                elif isinstance(block, Table):
                    tabel_context = []
                    for row in block.rows:
                        for cell in row.cells:
                            for paragraph in cell.paragraphs:
                                # resp += paragraph.text.strip() + "\n"
                                tabel_context.append(paragraph.text.strip())
                    tabel_context.append('\n'.join(tabel_context[-100:]))
                    tabel_contexts.append(tabel_context)
                b_unit.update(1)
            for tabel_context in tabel_contexts:
                tabel_text = "\n".join(tabel_context[:-1])
                tabel_context = "\n".join(tabel_context[-1])
                promt = f'''
                    表格内容: {tabel_text},
                    表格上下文: {tabel_context},
                    请根据表格内容和表格的上下文,生成一个符合逻辑和上下文语境的表格的所有信息。
                    表格信息:
                '''
                # text_llava = parse_image_llava(image, message=promt)
                # text_llava = '\n' + '-'*100 + '\n:' + text_llava + '-'*100
                # resp = resp + "\n" + text_llava
            return resp

        text = doc2text(self.file_path)
        from unstructured.partition.text import partition_text
        return partition_text(text=text, **self.unstructured_kwargs)


if __name__ == '__main__':
    loader = RapidOCRDocLoader(file_path="../tests/samples/ocr_test.docx")
    docs = loader.load()
    print(docs)
