import argparse
import logging
import os
import re
import subprocess
import time
import traceback
# from bs4 import BeautifulSoup
# import html2text
# import tqdm
# import html2markdown
from pdf2docx import Converter

from fastapi import FastAPI
app = FastAPI()
from pydantic import BaseModel
from datetime import datetime
import uvicorn

first_char_arr = [
    ["一","二","三","四","五","六","七","八","九","十","十一","十二","十三","十四","十五","十六","十七","十八","十九","二十","二十一","二十二","二十三","二十四","二十五","二十六","二十七","二十八","二十九","三十"],
    ["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"]
]
logging.basicConfig(level=logging.ERROR,format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# pdf文件转化为对应的docx文件
def PdfToDocx(pdf_file,docx_file):
    cv = Converter(pdf_file)
    #cv.convert(docx_file , multi_processing=True)
    cv.convert(docx_file)
    cv.close()

def get_substring_between_strings(string, substring1, substring2):
    start_index = string.find(substring1) + len(substring1)
    end_index = string[start_index:].find(substring2) + start_index
    if start_index < end_index:
        return string[start_index:end_index], end_index
    else:
        return "", 0

def deleteTxt(dir):
    # 遍历指定目录及其所有子目录，收集.txt文件的路径
    for root, dirs, files in os.walk(dir):
        for file in files:
            if file.endswith(".txt"):
                os.remove(os.path.join(root, file))

def check_next_title(resulttxt,titles,i,index):
    check_str1 = resulttxt[index - 80:index]
    check_str2 = resulttxt[index + 5:index + 85]
    if i == 0:
        if titles[i + 2] in check_str2:
            return resulttxt.find(titles[i + 1], index+5)
        else:
            return index
    else:
        if titles[i] in check_str1 or titles[i + 2] in check_str2:
            return resulttxt.find(titles[i + 1], index+5)
        else:
            return index

def split_txt(txt_arr,first_char,filename):
    jsons_obj = []
    flag = 0
    flags = []
    for i in range(len(txt_arr)):
        if txt_arr[i].startswith(first_char[flag]):
            # print(flag)
            # print(txt_arr[i])
            flags.append({
                "flag":flag,
                "index":i
            })
            flag += 1
    fi = 0
    for flag in flags:
        jsons_obj.append({
            "filename":filename+str(flag["flag"]),
            "content":txt_arr[fi:flag["index"]]
        })
        fi = flag["index"]
    jsons_obj.append({
        "filename":filename+str(flags[len(flags)-1]["flag"]+1),
        "content":txt_arr[flags[len(flags)-1]["index"]:]
    })
    return jsons_obj

# 通过pandoc把docx文件读取出来，转化成txt文件
def readDoxToTxt(docx_file,txt_dir,file_name,file_type):
    pandoc_command2 = ['pandoc', docx_file, '-t', 'plain']
    result = subprocess.run(pandoc_command2, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, encoding='utf-8', check=True)
    resulttxt = result.stdout
    resulttxt = resulttxt.replace("[]","").replace("[TABLE]","")
    # 清除掉页码（单独的数字行或者第*页）
    resulttxt = re.sub(r'\n\d+\n', '', resulttxt)
    resulttxt = re.sub(r'\n-第\d+页-\n', '', resulttxt)
    resulttxt = "\n".join([item.strip() for item in resulttxt.split("\n") if item])
    # 保存原始的转化后的文本数据
    with open(os.path.join(txt_dir,file_name+".txt"), 'w', encoding='utf-8') as f:
        f.write(resulttxt)
    # 尝试清理目录 （查询到前1000个字符里是否包含目录，包含的将首个出现的 第一章与下一个第一章之间的内容清理掉）
    mltxt = resulttxt[0:1000]
    flag = mltxt.find("目录")
    flag2 = mltxt.find("目 录")
    if flag != -1 or flag2 != -1:
        if file_type == 0:
            i1 = resulttxt.find("第一章")
            i2 = resulttxt.find("第一章",i1+5)
        else:
            i1 = resulttxt.find("一、")
            i2 = resulttxt.find("一、", i1 + 5)
        if i1 != -1 and i2 != -1:
            resulttxt = resulttxt[i2-3:]

    # 按章节内容进行切分（章节需要独立的行，下一个章节位置前后80个字符不能出现上一个章节和下一个章节名称）
    tit1 = ["第一章","\n第二章","\n第三章","\n第四章","\n第五章","\n第六章","\n第七章","\n第八章","\n第九章","\n第十章","\n第十一章","\n第十二章","\n第十三章","\n第十四章","\n第十五章","\n第十六章","\n第十七章","\n第十八章","\n第十九章","\n第二十章","\n第二十一章","\n第二十二章","\n第二十三章","\n第二十四章"]
    tit2 = ['一、', '\n二、', '\n三、', '\n四、', '\n五、', '\n六、', '\n七、', '\n八、', '\n九、', '\n十、', '\n十一、',
          '\n十二、', '\n十三、', '\n十四、', '\n十五、', '\n十六、', '\n十七、', '\n十八、', '\n十九、', '\n二十、',
          '\n二十一、', '\n二十二、', '\n二十三、', '\n二十四、', '\n二十五、', '\n二十六、', '\n二十七、', '\n二十八、',
          '\n二十九、', '\n三十、']
    if file_type == 0:
        titles = tit1
    else:
        titles = tit2
    list_ctxt = []
    for i in range(len(titles)-1):
        # 找寻下一个章节的第一次出现的位置
        index = resulttxt.find(titles[i+1], 0)
        # 找不到就当成最后一个章节 剩下的内容归入最后一个章节内容
        if index == -1 or i+2 > len(titles)-1:
            list_ctxt.append(resulttxt)
            break
        # 检查下一个章节前后内容是否符合规范，不符合规范找到下一个位置返回，按当前位置切分resulttxt
        cindex = check_next_title(resulttxt,titles,i,index)
        if cindex != -1:
            list_ctxt.append(resulttxt[0:cindex])
            resulttxt = resulttxt[cindex+1:]
    # 将切分好的章节内容写入到文件里，后续还可以对切分后的内容进行二次切分
    hebing = ""
    for i in range(len(list_ctxt)):
        line = list_ctxt[i]
        with open(os.path.join(txt_dir, str(i) + ".txt"), 'w',encoding='utf-8') as f:
            f.write(line)
        # ctlist = [item.strip() for item in line.split("\n") if item]
        # 在这进行二次拆分
        # with open(os.path.join(txt_dir, file_name + "-" + titles[i].replace("\n","") +".txt"),'w',encoding='utf-8') as f:
        #     f.write(line)
    # 暂停二次拆分
    #     for ct in ctlist:
    #         if ct.startswith("1"):
    #             jsonsobj = split_txt(ctlist, first_char_arr[1] , file_name + "-" + titles[i].replace("\n",""))
    #             break
    #         elif ct.startswith("一"):
    #             jsonsobj = split_txt(ctlist, first_char_arr[0] , file_name + "-" + titles[i].replace("\n",""))
    #             break
    #     for jonsx in jsonsobj:
    #         with open(os.path.join(txt_dir, jonsx["filename"] + ".txt"), 'w',encoding='utf-8') as f:
    #             hebing += "\n".join(jonsx["content"])
    #             hebing += "\n&&&&&\n"
    #             f.write("\n".join(jonsx["content"]))
    # with open(os.path.join(txt_dir, "hebing.txt"), 'w',encoding='utf-8') as f:
    #     f.write(hebing)

# 查找所有文件的路径
def find_files(directory):
    # 存储所有txt文件的路径
    txt_files = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            txt_files.append(os.path.join(root, file))

    print("找到文件数：", len(txt_files))
    return txt_files

def create_directory_if_not_exists(directory):
    if not os.path.exists(directory):
        os.makedirs(directory)


class MyClassModel(BaseModel):
    filepath: str
    filename: str
    filetype: int

@app.post("/exe")
async def exe(myclass: MyClassModel):

    try:
        # deleteTxt("./招标文件/")
        start_time = time.time()

        files_path = myclass.filepath
        file_name = myclass.filename
        file_type = myclass.filetype

        txt_dir = os.path.join("/home/linweibin/liujian/project/policy-tender/data", file_name)
        create_directory_if_not_exists(txt_dir)

        if files_path.endswith('.pdf'):
            docx_file = files_path.replace('.pdf', '.docx')
            PdfToDocx(files_path, docx_file)
            readDoxToTxt(docx_file, txt_dir, file_name, file_type)
        elif files_path.endswith('.docx'):
            readDoxToTxt(files_path, txt_dir, file_name, file_type)
        elif files_path.endswith('.doc'):
            try:
                readDoxToTxt(files_path, txt_dir, file_name, file_type)
            except subprocess.CalledProcessError as e:
                print(f"Error converting file: {e}")

        end_time = time.time()
        print("执行耗时：", end_time - start_time, "秒")
    except Exception as e:
        formatted_exc = traceback.format_exc()
        print("堆栈跟踪:")
        print(formatted_exc)
        return 0

    return 1


if __name__ == '__main__':
    uvicorn.run(app, host="0.0.0.0", port=19308)