import os
import shutil
import uuid
from dataclasses import dataclass

from langchain.docstore.document import Document as document_langchain
from docx.document import Document
import docx
from docx import ImagePart

from docx.oxml import CT_P, CT_Tbl
from docx.table import _Cell, Table
from docx.text.paragraph import Paragraph
from langchain.text_splitter import CharacterTextSplitter



@dataclass
class ImageTextPart():
    images_part: ImagePart
    text: str = ""


# 获取图片（该行只能有一个图片）
def get_ImagePart(graph: Paragraph, doc: Document):
    images = graph._element.xpath('.//pic:pic')  # 获取所有图片
    for image in images:
        for img_id in image.xpath('.//a:blip/@r:embed'):  # 获取图片id
            part = doc.part.related_parts[img_id]  # 根据图片id获取对应的图片
            if isinstance(part, ImagePart):
                return part
    return None


def is_image(graph: Paragraph, doc: Document):
    images = graph._element.xpath('.//pic:pic')  # 获取所有图片
    for image in images:
        for img_id in image.xpath('.//a:blip/@r:embed'):  # 获取图片id
            part = doc.part.related_parts[img_id]  # 根据图片id获取对应的图片
            if isinstance(part, ImagePart):
                return True
    return False


def read_table_clean(table):
    title_map = {}
    row_index = 1

    for row in table.rows:

        cell_index = 1
        for cell in row.cells:
            if title_map.get("content_" + str(cell_index)):
                title_map["content_" + str(cell_index)] = title_map[
                                                              "content_" + str(cell_index)] + ";" + cell.text.replace(
                    "\n", "").strip() + ""
            else:
                title_map["content_" + str(cell_index)] = cell.text.replace(
                    "\n", "").strip()  + ":"
            cell_index = cell_index + 1

        row_index = row_index + 1
    # 去重
    for key, value in title_map.items():
        # 1. 按 ';' 分割字符串为列表
        items = value.split(';')

        # 2. 去重并保持顺序（Python 3.7+ dict 保持插入顺序）
        unique_items = list(dict.fromkeys(items))

        # 3. 重新拼接为字符串
        title_map[key] = ';'.join(unique_items)
    result_with_separator = "\n".join(title_map.values())
    # print(result_with_separator)  # 输出: "value1, value2, value3"
    return result_with_separator


def read_table(table):
    title_map = {}
    row_index = 1
    table_list = []
    for row in table.rows:

        cell_index = 1
        table_content = ""
        for cell in row.cells:
            if row_index == 1:
                title_map["title_" + str(cell_index)] = cell.text.replace("\n", "").strip()
                cell_index = cell_index + 1
            else:
                table_content = table_content + "" + title_map["title_" + str(cell_index)] + "：" + cell.text.replace(
                    "\n", "").strip() + ""
                cell_index = cell_index + 1

        table_list.append(table_content)
        row_index = row_index + 1
    table_string = " \n ".join([table_content for table_content in table_list])
    # print(table_string)
    # tabels = [[cell.text for cell in row.cells] for row in table.rows]
    # table_string = " \n ".join(["|".join(row) for row in tabels])
    return table_string
def read_table_text(table):

    tabels = [[cell.text for cell in row.cells] for row in table.rows]
    table_string = " \n ".join(["|".join(row) for row in tabels])
    return table_string

def read_table_string(table):
    tabels = [[cell.text for cell in row.cells] for row in table.rows]
    table_string = " \n ".join(["|".join(row) for row in tabels])
    return table_string


# 文本分块判断是什么类型
def iter_block_items(parent):
    """
    Yield each paragraph and table child within *parent*, in document order.
    Each returned value is an instance of either Table or Paragraph. *parent*
    would most commonly be a reference to a main Document object, but
    also works for a _Cell object, which itself can contain paragraphs and tables.
    """

    if isinstance(parent, Document):
        parent_elm = parent.element.body
    elif isinstance(parent, _Cell):
        parent_elm = parent._tc
    else:
        # print(parent)
        raise ValueError("something's not right")

    for child in parent_elm.iterchildren():
        if isinstance(child, CT_P):
            para = Paragraph(child, parent)
            if is_image(para, parent):
                # print(para.text)
                image_entity = get_ImagePart(para, parent)

                yield ImageTextPart(images_part=image_entity, text=para.text)
            else:

                yield Paragraph(child, parent)
        elif isinstance(child, CT_Tbl):
            yield Table(child, parent)


class DocxEntity():
    def __init__(self, data_type, data):
        self.data_type = data_type
        self.data = data


# 文档处理核心(单文件)
def parse_doc_by_one(doc_path: str,chunk_size = 600):
    doc = docx.Document(doc_path)
    result = []
    # 临时保存图片的文件夹
    temp_image_folder = "temp_images"
    if not os.path.exists(temp_image_folder):
        os.makedirs(temp_image_folder)
    docx_list = []

    for elem in iter_block_items(doc):
        if isinstance(elem, Paragraph):
            real_content = elem.text
            result.append(real_content)
            docx_list.append(DocxEntity("text", real_content))


        elif isinstance(elem, Table):
            #  prompt_in = '''
            # 你是一个表格解析专家，现在给你一段表格的string格式 ，将其中内容进行整理，如行列整理，字段对应。最后返回一个清晰的json的键值对格式。
            # 要求：不要删改内容，只返回table对应的键值对格式，不要返回其他内容,特别是table标题等关键信息必须保留。表格内所有内容必须全部转化解析
            # 表格内容为''' + read_table_string(elem) + '''
            #  '''
            #  res = chat_zhipu_ai(prompt_in, [])
            result.append(read_table(elem))
            print("!!!!!!表格内容为" + read_table(elem))
            print("!!!!!!表格clean内容为" + read_table_clean(elem))
            docx_list.append(DocxEntity("table", read_table(elem)+read_table_clean(elem) ) )
            docx_list.append(DocxEntity("table", read_table_text(elem)))
            docx_list.append(DocxEntity("text", read_table_text(elem)))
        elif isinstance(elem, ImageTextPart):
            file_path = temp_image_folder + "/" + uuid.uuid4().hex + ".jpg"
            with open(file_path, "wb") as img_file:
                img_file.write(elem.images_part.blob)

            try:
                text = ''''""text": "是""'''
                # text = chat_zhipu_ai_images(file_path)
                print("!!!!!!图片内容为" + text)
                if len(text) < 5 and '是' in text:
                    result.append(file_path)
                    docx_list.append(DocxEntity("text", file_path))
                elif "'是'" in text:
                    result.append(file_path)
                    docx_list.append(DocxEntity("text", file_path))
                elif ''''"是"''' in text:
                    result.append(file_path)
                    docx_list.append(DocxEntity("text", file_path))
                elif ''''""text": "是""''' in text:
                    result.append(file_path)
                    docx_list.append(DocxEntity("text", file_path))
                elif '''"text": '是' ''' in text:
                    result.append(file_path)
                    docx_list.append(DocxEntity("text", file_path))
                else:
                    result.append(text)
                    docx_list.append(DocxEntity("text", text))
                docx_list.append(DocxEntity("text", elem.text))
            except Exception as e:
                result.append(file_path)
                docx_list.append(DocxEntity("text", elem.text))
                docx_list.append(DocxEntity("text", file_path))

    # shutil.rmtree(temp_image_folder)

    # langchain切分
    # combined_str = "\n".join(result)
    # docs = [document_langchain(page_content=combined_str)]
    #
    # text_splitter = CharacterTextSplitter(
    #     chunk_size=600, chunk_overlap=0, separator='\n')
    # doc_texts = text_splitter.split_documents(docs)
    # texts = [d.page_content for d in doc_texts if d.page_content.strip()]
    texts = []
    current_text = ""
    before_type = ""
    for docx_content in docx_list:

        if before_type == "":
            if docx_content.data_type == "text":
                before_type = "text"
                current_text = current_text + "\n" + docx_content.data
            if docx_content.data_type == "table":
                before_type = "table"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
            if docx_content.data_type == "images":
                before_type = "images"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
                # before_data = docx_content.data
        elif before_type == "text":
            if docx_content.data_type == "text":
                before_type = "text"
                current_text = current_text + "\n" + docx_content.data
            if docx_content.data_type == "table":
                before_type = "table"
                # 切分 current_text到 texts中
                docs = [document_langchain(page_content=current_text)]
                text_splitter = CharacterTextSplitter(
                    chunk_size=chunk_size, chunk_overlap=0, separator='\n')
                doc_texts = text_splitter.split_documents(docs)
                current_list = [d.page_content for d in doc_texts if d.page_content.strip()]
                # 清空current_text到texts
                texts = texts + current_list
                # 添加table内容到 texts中
                texts.append(docx_content.data)
                current_text = ""
            if docx_content.data_type == "images":
                before_type = "images"

                docs = [document_langchain(page_content=current_text)]
                text_splitter = CharacterTextSplitter(
                    chunk_size=chunk_size, chunk_overlap=0, separator='\n')
                doc_texts = text_splitter.split_documents(docs)
                current_list = [d.page_content for d in doc_texts if d.page_content.strip()]
                # 清空current_text到texts
                texts = texts + current_list
                # 添加table内容到 texts中
                texts.append(docx_content.data)
                current_text = ""

                # before_data = before_data+ docx_content.data
        elif before_type == "table":
            if docx_content.data_type == "text":
                before_type = "text"
                current_text = current_text + docx_content.data
            if docx_content.data_type == "table":
                before_type = "table"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
            if docx_content.data_type == "images":
                before_type = "images"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
                # before_data = before_data+ docx_content.data
        elif before_type == "images":
            if docx_content.data_type == "text":
                before_type = "text"
                current_text = current_text + docx_content.data
                texts.append(docx_content.data)
                # before_data = ''
            if docx_content.data_type == "table":
                before_type = "table"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
            if docx_content.data_type == "images":
                before_type = "images"
                # 添加table内容到 texts中
                texts.append(docx_content.data)
                # before_data =before_data+ docx_content.data
    if current_text:
        docs = [document_langchain(page_content=current_text)]
        text_splitter = CharacterTextSplitter(
            chunk_size=chunk_size, chunk_overlap=0, separator='\n')
        doc_texts = text_splitter.split_documents(docs)
        current_list = [d.page_content for d in doc_texts if d.page_content.strip()]
        # 清空current_text到texts
        texts = texts + current_list

    return texts

# abc = parse_doc_by_one("C:\\Users\\Administrator\\Desktop\\高校兼职\\测试数据2\\北京市绿道系统专项规划（2023年-2035年）.docx")
# print(abc)
