import json
from pathlib import Path
from bs4 import BeautifulSoup
import re
import markdown
from api_doubao import doubao_chat
from langchain.docstore.document import Document
from dataStorage import to_database
import os
import pandas as pd

def process_dataframe():
    df = pd.read_excel("file_map_int.xlsx")
    df = df[(df["file_type"] == 1) & (df["duplicates"] == 0)]
    df["new_pdf"] = df["file_origin_local"].str.replace('|', '_', regex=False) + "_" + df[
        "file_name_origin"].str.replace(".pdf", "", regex=False)
    df_name = df["file_origin_local"].tolist()
    df_name1 = df["file_name_origin"].tolist()
    print(len(df_name), len(df_name1))
    new_name = [str(a).replace("\\", "_") + "_" + str(b).replace(".pdf", "") for a, b in zip(df_name, df_name1)]
    old_name = [str(a).replace(".pdf", "") for a in df["file_name"].tolist()]
    # for a, b in zip(old_name, new_name):
        # print(a, b)
    return old_name, new_name

class ProcessMd(object):
    def __init__(self,path):
        self.path = path

    def open_md(self):
        with open(self.path,"r",encoding='utf-8') as file:
            markdown_text = file.read()
            # 去除两个星号的加粗格式
            markdown_text = re.sub(r'\*\*(.*?)\*\*', r'\1', markdown_text)
            # 去除两个下划线的加粗格式
            markdown_text = re.sub(r'__(.*?)__', r'\1', markdown_text)
            html = markdown.markdown(markdown_text)
            # print(html)
            # print("----------------------")
            return html
    @staticmethod
    def mll_split(line):
        prompt = "文字段落如下：\n"+ line + "\n你的任务是按逻辑与语料关系，把上述超过500字的文字段落切分成成至少2个段落。输出格式为json，{\"切分后的段落\":[XXX,XXX]}"
        res = doubao_chat(prompt)
        try:
            j_r = json.loads(res)
            r = j_r["切分后的段落"]
        except Exception as e:
            print(f"大模型切分文件出错：{e}")
            r = [line]
        return r

    @staticmethod
    def input_doc(text_list,article_name):
        doc_list = []
        for item in text_list:
            doc_list.append(Document(page_content=item, metadata={'org_article_name': article_name}))
        return doc_list

    @staticmethod
    def split_md(html):
        soup = BeautifulSoup(html, 'html.parser')
        text_list = []
        temp = ""
        useful_tag = [tag.get_text(strip=True) for tag in soup.find_all() if  tag.name not in ['script', 'style',"ul"]]
        last_one_idx = len(useful_tag)
        for idx, tag_txt in enumerate(useful_tag):
            if idx+1 == last_one_idx:
                text_list.append(temp+tag_txt)
            else:
                _n = tag_txt.count('|')
                if len(tag_txt)==0:
                    continue
                if _n>10:
                    text_list.append(temp + tag_txt)
                else:
                    if len(tag_txt)>500:
                        spilt_txt = ProcessMd.mll_split(tag_txt)
                        text_list = text_list +[temp] +spilt_txt
                        # text_list = text_list + [temp+ tag_txt]
                        temp = ""
                    else:
                        _len = len(tag_txt) + len(temp)
                        if _len <= 500:
                            if tag_txt in temp:
                                continue
                            temp = temp +"\n"+tag_txt
                        else:
                            text_list.append(temp)
                            temp = tag_txt
        set_text_list = set(text_list)
        print(f"text_list:{len(text_list)},set_text_list = {len(set_text_list)}")
        new_text_list = [i.strip() for i in set_text_list]
        return new_text_list

def process_md():
    sql = "select distinct article_name from documents_split_text"
    done_name = to_database(sql, ())
    done_name_list = [i[0] for i in done_name]
    sql = "insert into documents_split_text (article_name,article_split_id,article_split_text) values (%s,%s,%s)"
    directory = os.path.join(os.getcwd(),"lxy_com")
    old_name,new_name = process_dataframe()
    use_path = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            if filename.endswith(".md"):
                # if filename.split(".")[0] in old_name:
                filepath = os.path.join(root, filename)
                use_path.append(filepath)
    ii = 1
    for md_path in use_path:
        print(f"正在处理第{ii}篇：文件地址为：{md_path}")
        ii = ii + 1
        md_name = Path(md_path).stem
        if not (md_name in old_name):
            continue
        n_idx = old_name.index(md_name)
        n_name = new_name[n_idx]
        if n_name in done_name_list:
            print(f"该文章已经处理过")
            continue
        md = ProcessMd(md_path)
        html = md.open_md()
        text_list = md.split_md(html)
        for idx , item in enumerate(text_list):
            data = (n_name,idx+1,item)
            # print(len(item),item)
            to_database(sql,data)
            # print("************")
    print(f"done")


def process_md_v1():
    sql = "select distinct article_name from documents_split_text"
    done_name = to_database(sql, ())
    done_name_list = [i[0] for i in done_name]
    sql = "insert into documents_split_text (article_name,article_split_id,article_split_text,in_or_out_file) values (%s,%s,%s,%s)"
    directory = os.path.join(os.getcwd(),"lxy_com")
    use_path = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            if filename.endswith(".md"):
                # if filename.split(".")[0] in old_name:
                filepath = os.path.join(root, filename)
                use_path.append(filepath)
    ii = 1
    for md_path in use_path:
        print(f"正在处理第{ii}篇：文件地址为：{md_path}")
        ii = ii + 1
        md_name = Path(md_path).stem
        if md_name in done_name_list:
            print(f"该文章已经处理过")
            continue
        md = ProcessMd(md_path)
        html = md.open_md()
        text_list = md.split_md(html)
        for idx , item in enumerate(text_list):
            data = (md_name,idx+1,item,0)
            # print(len(item),item)
            to_database(sql,data)
            # print("************")
    print(f"done")




if __name__ == '__main__':
    # sql = 'select file_name from file_map where file_type=1'
    # file = to_database(sql, ())
    # file_name_list = [i[0].split(".")[0] for i in file]
    # directory = os.path.join(os.getcwd(),"pdf")
    process_md_v1()

    print("ok")
    # # # r = r"D:\record\dingding\olm_ocr_testfiles\0d61a5506110d60e7a68e390ac13f86151d7d19f_2025-03-31.md"#985
    # # # r = r"D:\record\dingding\olm_ocr_testfiles\01fe226cea51becfb0b93a56c39e7eb56230088e_2025-03-31.md"
    # # # r = r"D:\record\dingding\olm_ocr_testfiles\478133274d2ac8a3055d6924abc8e37aecfb9b16_2025-03-31.md"
    # # # r = r"D:\record\dingding\olm_ocr_testfiles\adda753127d970a248726ef587e2d3eba7c2ee2e_2025-03-31.md"
    # r = r"E:\llm\data_file\a\20250327_152046_763\20250327_152046_763.md"
    # # r = r"E:\llm\data_file\a\20250327_162504_295\20250327_162504_295.md"
    # r = r"E:\llm\data_file\a\20250327_091853_460\20250327_091853_460.md"
    # md = ProcessMd(r)
    # html = md.open_md()
    # text_list = md.split_md(md.open_md())
    # for i in text_list:
    #     print("************")
    #     print(i)



