import argparse
import os
import openai
import time
from transformers import AutoModel, AutoTokenizer
import json
import docx

from docx.document import Document
from docx.text.paragraph import Paragraph
from docx.table import _Cell, Table
from docx.oxml.text.paragraph import CT_P
from docx.oxml.table import CT_Tbl
import logging
import time
import traceback

# def set_api():
#     openai.api_base = "http://localhost:8000/v1"
#     openai.api_key = "none"

# def generate_sth(requirements_document_text):
#     response = openai.ChatCompletion.create(model="chatglm2-6b-32k",
#                                             messages=[{"role": "user", "content": requirements_document_text}])
#     ret = response["choices"][0]["message"]["content"]
#     return ret


def docx2txt_iter_block_items(parent):
    """
    Yield each paragraph and table child within *parent*, in document order.
    Each returned value is an instance of either Table or Paragraph. *parent*
    would most commonly be a reference to a main Document object, but
    also works for a _Cell object, which itself can contain paragraphs and tables.
    """
    if isinstance(parent, Document):
        parent_elm = parent.element.body
    elif isinstance(parent, _Cell):
        parent_elm = parent._tc
    else:
        raise ValueError("something's not right")
    for child in parent_elm.iterchildren():
        if isinstance(child, CT_P):
            yield Paragraph(child, parent)
        elif isinstance(child, CT_Tbl):
            yield Table(child, parent)

def docx2txt_get_text_from_table(table):
    table_headers = [cell.text for cell in table.rows[0].cells]
    print("table_headers", table_headers)
    table_data = []
    for row in table.rows[1:]:
        # row_data = {header: cell.text for header, cell in zip(table_headers, row.cells)}
        # table_data.append(row_data)
        new_dict = {}
        for header, cell in zip(table_headers, row.cells):
            if cell.text != "":
                new_dict[header] = cell.text
        table_data.append(new_dict)
    return table_data

def pretreat_docx2txt(input_path: str, temp_path: str):

    input_name = os.path.basename(input_path)
    doc = docx.Document(input_path)

    # 遍历文档的段落和表格
    blocks = docx2txt_iter_block_items(doc)
    txt = ""
    txt = txt + input_name.split(".docx")[0] + '\n'

    for block in blocks:
        # img = get_picture(doc, block)
        # if img:
        #     pass
        # print("block.style.name", block.style.name)
        if block.style.name == 'Table Grid':

            table_txts = docx2txt_get_text_from_table(block)
            json_data = json.dumps(table_txts, ensure_ascii=False).replace("\"", "\'")
            # print("json_data ", type(json_data))
            txt = txt + json_data
        elif "Heading" in block.style.name or 'Normal' in block.style.name:
            txt = txt + block.text + "\n"

    # 保存文本和表格数据为.txt 文件
    with open(temp_path, 'w', encoding='utf-8') as f:
        f.write(txt)
    print(f"将 docx 前处理成 txt 的文件已经写入到：{temp_path}")
    logging.info(f"将 docx 前处理成 txt 的文件已经写入到：{temp_path}")


def add_heading_numbers(markdown_text_lines):
    result = []
    heading_levels = [0, 0, 0, 0, 0, 0]

    for line in markdown_text_lines:
        if line.startswith("#"):
            level = line.count("#")
            for i in range(level, len(heading_levels)):
                heading_levels[i] = 0

            heading_levels[level - 1] += 1
            new_heading = f"{'#'*level} {'.'.join(map(str, heading_levels[:level]))} {line.lstrip('# ')}"
            result.append(new_heading)
        else:
            result.append(line)

    return result

def posttreat_handle_markdown(input_path, output_path):
    with open(input_path, "r", encoding="utf-8") as file:
        markdown_text_lines = file.readlines()
    result = add_heading_numbers(markdown_text_lines)

    with open(output_path, "w", encoding="utf-8") as file:
        file.write(''.join(result))
        
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    # parser.add_argument('--file_path', default='内部供应链分册_仓储管理_需求.docx', type=str)
    # parser.add_argument('--file_path', default='生产制造分册_SOP_需求.docx', type=str)
    # parser.add_argument('--file_path', default='内部供应链分册_质检管理_需求.docx', type=str)
    parser.add_argument('--file_path', default='内部供应链分册_质检管理.docx', type=str)
    
    
    parser.add_argument('--input_path_prefix', default='../docx/', type=str) # 存放上传输入 docx 文档
    parser.add_argument('--input_temp_path_prefix', default='../input_pretreat_temp/', type=str) # 存放 docx 文档转纯文本后的 txt
    parser.add_argument('--output_temp_path_prefix', default='../output_posttreat_temp/', type=str) # 存放模型输出、但未经后处理的 txt
    parser.add_argument('--output_path_prefix', default='../md/', type=str) # 存放经过后处理、最终输出 txt 文档
    args = parser.parse_args()
    
    
    save_path = "/home/hj2/docx2be/log"
    
    # 获取当前时间戳
    timestamp = int(time.time())
    # 获取当前 Python 文件的文件名
    file_name = os.path.basename(__file__).split(".")[0]
    # 组合日志文件名
    log_file_name = os.path.join(save_path, f"{timestamp}-{file_name}.log")

    # 设置 logger
    logging.basicConfig(filename=log_file_name, level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
    
    try:
        # 模型路径
        pt = './glm2-6b/ckpt/20231130-ckpt/1130'
        device = "cuda:0"
        
        if not os.path.exists(args.input_temp_path_prefix):
            os.makedirs(args.input_temp_path_prefix)
        if not os.path.exists(args.output_path_prefix):
            os.makedirs(args.output_path_prefix)
        if not os.path.exists(args.output_temp_path_prefix):
            os.makedirs(args.output_temp_path_prefix)
        
        # set_api()
        print("os.path.basename(args.file_path)", os.path.basename(args.file_path))
        if not os.path.basename(args.file_path).endswith('.docx'):
            print(f"'{args.file_path}', 并非.docx 文本文档，请检查，程序终止")
            logging.info(f"'{args.file_path}', 并非.docx 文本文档，请检查，程序终止")
            exit()
            
        name = args.file_path.split('.docx')[0]
        
        # 输入文档路径
        input_docx_name = name + '.docx'
        input_docx_path = args.input_path_prefix + input_docx_name
        
        # 输入前处理结果保存路径 (output .txt)
        input_temp_docx_name = name + '.txt'
        input_temp_docx_path = args.input_temp_path_prefix + input_temp_docx_name
        
        # 输出文档（未对标题进行编号）路径 (output .txt)
        output_temp_txt_name = name + '_生成设计 (未标号).txt'
        output_temp_txt_path = args.output_temp_path_prefix + output_temp_txt_name
        
        # 输出文档（已对标题进行编号）路径 (output .txt)
        output_txt_name = name + '_生成设计.txt'
        output_txt_path = args.output_path_prefix + output_txt_name
        
        # 1. 输入前处理
        pretreat_docx2txt(input_docx_path, input_temp_docx_path)
        
        # 2. 模型处理部分 START
        with open(input_temp_docx_path, "r") as f:
            require_ = f.readlines()
        # print(require_)
        input_string = "".join(require_)
        
        print("正在加载模型......")
        logging.info("正在加载模型......")
        tokenizer = AutoTokenizer.from_pretrained(pt, trust_remote_code = True)
        model = AutoModel.from_pretrained(pt, trust_remote_code=True).half().to(device)
        # model = AutoModel.from_pretrained(pt, trust_remote_code=True).to(device)
        model.eval()
        
        print("正在生成......")
        logging.info("正在生成......")
        start = time.time()
        t1 = time.time()
        # output_string = generate_sth(input_string)
        res, his = model.chat(tokenizer,input_string)
        # res, his = model.chat(tokenizer,input_string)
        
        # # generate
        # top_p = 0.01 # 越小越确定
        # temperature = 0.01 # 越小越确定

        # inputs = tokenizer(
        #         input_string,
        #         return_tensors="pt"
        #     )
        # inputs = inputs.to(device)
        # gen_kwargs = {"max_length": 8192, "num_beams": 1, "do_sample": True, "top_p": 0.2,
        #                 "temperature": 0.01, "logits_processor": None, "use_cache": True}
        # # print("model.generate(**inputs, **gen_kwargs)", len(model.generate(**inputs, **gen_kwargs)))
        # res = tokenizer.decode(model.generate(**inputs, **gen_kwargs)[0])
        
        print('耗时：', time.time()-t1)
        
        print("生成完成......")
        logging.info("生成完成......")
        end = time.time()
        
        duration_time = end - start
        print(f"生成时间{duration_time}s")
        logging.info(f"生成时间{duration_time}s")
        # 2. 模型处理部分 END
        
        # 3. 后处理部分
        # 中间结果输出（未标号）
        with open(output_temp_txt_path, 'w', encoding='utf-8') as f:
            f.write(res)
        print(f"中间结果保存至：'{output_temp_txt_path}'")
        logging.info(f"中间结果保存至：'{output_temp_txt_path}'")
        
        print("正在进行后处理步骤....")
        logging.info("正在进行后处理步骤....")
        posttreat_handle_markdown(output_temp_txt_path, output_txt_path)
        print(f"生成文档输出到：'{output_txt_path}'")
        logging.info(f"生成文档输出到：'{output_txt_path}'")
    except Exception:
        logging.info(traceback.print_exc())