import os
import PyPDF2
import pandas as pd
import logging
import json
import pdb

def read_txt_file(path):
    with open(path, 'r', encoding="utf-8") as f:
        content = [_.strip() for _ in f.readlines()]
    return content

"""
jaccard获得相似度
"""


def M_jaccard(s1, s2):
    s1 = set(s1)
    s2 = set(s2)
    ret1 = s1.intersection(s2)
    ret2 = s1.union(s2)
    jaccard = 1.0 * len(ret1) / len(ret2)
    return jaccard


"""
通过jaccard获得相似度
利用滑动窗口找到text中与sentence相似度最大的一段
offset是设置的一个偏移量
"""


def get_similarity(sentence="", text=""):
    # sentence = "中成药单独使用或联合其他方法治疗 SOP 患者，在改善骨密度值、调节骨转换指标、降低骨折风险、减少疼痛、提高生活质量、改善中医临床症状方面的中医辨证应用、有效性和安全性如何？"
    # 要匹配的文本
    # text = "中成药单独使用或联合其他方法治疗 SOP 患者，在改善骨密度值、调节骨转换指标、降低骨折风险、减少疼痛、提高生活质量、改善中医临床症状方面的中医辨证应用、有效性和安全性如何？"
    window_size = len(sentence)
    offset = [0]  # , 10, 20, 40, 80, 100
    max_jac = 0
    best_match = None
    best_offset = None
    jaccard = None
    for i in offset:
        for j in range(len(text) - window_size + 1):
            if i + j < len(text) - 1:
                jaccard = M_jaccard(sentence, text[j:i + j + window_size])
                end = i + j + window_size
                begin = j
                while end + 1 < (len(text)) and text[end] not in ['?', '。', '!']:  # 以句子为单位不截断
                    end += 1
                while begin >= 0 and text[begin] not in ['?', '。', '!']:  # 以句子为单位不截断
                    begin -= 1
                window = text[begin + 1:end + 1]
                if jaccard > max_jac:
                    best_match = window
                    max_jac = jaccard
                    best_offset = i

    print(max_jac)
    print(best_offset)
    print(best_match)
    return max_jac, best_match


"""
从file_path获得pdf的内容
"""


def read_pdf_from_file(file_path):
    # 遍历文件夹及其子文件夹中的所有Excel
    with open(file_path, 'rb') as pdf_file:
        pdf_reader = PyPDF2.PdfReader(pdf_file)

        # 获取PDF文件的页数
        num_pages = len(pdf_reader.pages)

        # 创建一个空字符串，用于存储PDF文本
        pdf_text = ""

        # 循环遍历每一页，将文本添加到pdf_text中
        for page in range(num_pages):
            page_obj = pdf_reader.pages[page]
            pdf_text += page_obj.extract_text()

        return pdf_text


"""
从文件夹中获取所有指定类型的文件路径
"""


def get_allFile_from_folder(folder_path, file_type="pdf"):
    file_paths = []
    for root, dirs, files in os.walk(folder_path):
        for file_name in files:
            # 判断文件类型
            if file_name.endswith('.' + file_type):
                file_paths.append(os.path.join(root, file_name))
    return file_paths


"""
获得星形结构，也就是entity与property的关联结构
通过entity的值进行分割
如果property=描述,value文本；否则property=其他内容，value为属性
"""


def get_star_structure(file_path):
    df = pd.read_excel(file_path, engine='openpyxl')
    entitys = df['entity']
    propertys = df['property']
    values = df['value']
    dic = {}
    # 分组
    for i, entity in enumerate(entitys):
        # 找到所有的临床问题
        if entity not in dic:
            dic[entity] = {propertys[i]: values[i]}
        else:
            if propertys[i] not in dic[entity]:
                dic[entity].update({propertys[i]: values[i]})
            else:
                dic[entity].update({propertys[i]: str(dic[entity][propertys[i]]) + "\n" + str(values[i])})

    # 分组结束后，选出包含property=描述的dict
    dic2 = {}
    for key, value in dic.items():
        if "描述" in value:
            dic2.update({key: value})
    with open(r'example002.json', "a+", encoding="utf-8") as f:
        f.write(json.dumps(dic2, ensure_ascii=False) + "\n")

    return dic2


"""
替换描述字段
通过相似度匹配（get_similarity），匹配到pdf中的文本内容
"""


def replace_from_text(pdf_file_path, excel_file_path):
    # pdf_file_path = r"C:\\Users\\songxuren\\Desktop\\项目\\指南知识图谱三元组提取\\中成药治疗骨质疏松症临床应用指南\\中成药治疗骨质疏松症临床应用指南（2021年）.pdf"
    # excel_file_path = r"C:\\Users\\songxuren\\Desktop\\项目\\指南知识图谱三元组提取\\中成药治疗骨质疏松症临床应用指南\\中成药治疗骨质疏松症三元组0405.xlsx"
    pdf_text = read_pdf_from_file(pdf_file_path).replace("\n", "").encode('gb2312', 'ignore').decode('gb2312')
    dic = get_star_structure(excel_file_path)
    for key, value in dic.copy().items():
        sentence = value["描述"]
        max_jac, dic[key]["描述"] = get_similarity(sentence, pdf_text)
        if max_jac < 0.7:  # 相似度太低的样本就丢弃了
            dic.pop(key)
    with open(r'example003.json', "a+", encoding="utf-8") as f:
        f.write(json.dumps(dic, ensure_ascii=False) + "\n")
    return dic


"""
生成训练文本，{ask="",answer=""}
"""


def get_train(pdf_file_path, excel_file_path):
    dic = replace_from_text(pdf_file_path, excel_file_path)
    # dic = json.load(open(r"C:\project\pythonProject\P-tuning-v2\get_preTraining_file\example003.log", 'r', encoding='gbk'))
    tep_dics = []
    for key, value in dic.items():
        text = value["描述"]
        for key2, value2 in value.items():
            if key2 == "描述":
                continue
            tep_dic = {"ask": "根据文本内容:“" + str(text) + "”回答" + str(key2) + "是什么", "answer": str(value2)}
            tep_dics.append(tep_dic)
    with open(r'example004.json', "a+", encoding="utf-8") as f:
        f.write(json.dumps(tep_dics, ensure_ascii=False) + "\n")


if __name__ == '__main__':
    folder_path = r'指南知识图谱三元组提取'
    pdf_file_paths = (get_allFile_from_folder(folder_path, file_type="pdf"))
    excel_file_paths = (get_allFile_from_folder(folder_path, file_type="xlsx"))
    for pdf_file_path in pdf_file_paths:
        dirname = os.path.dirname(pdf_file_path)
        excel_file_path = [i for i in excel_file_paths if os.path.dirname(i) == dirname][0]
        get_train(pdf_file_path, excel_file_path)


# def get_exampleLog():
#     logging.basicConfig(filename='example.log', level=logging.DEBUG, format='%(message)s')
#
#     # 定义要遍历的文件夹路径
#     folder_path = r'指南知识图谱三元组提取'
#     dfs = read_excel_from_folder(folder_path)
#     for df in dfs:
#         print(df)
#         entitys = df['entity']
#         propertys = df['property']
#         values = df['value']
#
#         for i, entity in enumerate(entitys):
#             # 找到所有的临床问题
#             if "临床问题" in str(entity) and propertys[i] == "描述":
#                 entity_id = entity.split('临床问题')[1]
#                 # 根据临床问题编号获得其他实体的描述
#                 dic = OrderedDict()
#                 for j, other_entity in enumerate(entitys):
#                     pattern = re.compile(r'[\u4e00-\u9fa5]' + entity_id + r'(-|\b)')
#                     match = pattern.search(str(other_entity))
#                     if match:
#                         if propertys[j] == "描述":
#                             pattern = re.compile(r'[\u4e00-\u9fa5]+')
#                             other_entity_name = pattern.findall(other_entity)[0]
#                             print(other_entity_name + ":")
#                             # logging.info(other_entity_name + ":")
#                             values[j] = values[j].replace("\n", "")
#                             print(values[j])
#                             # logging.info(values[j])
#                             if other_entity_name not in dic:
#                                 dic[other_entity_name] = values[j]
#                             else:
#                                 dic[other_entity_name] += "\n" + values[j]
#
#                 # 遍历字典中的所有键值对
#                 allStr = ""
#                 for key, value in dic.items():
#                     if allStr == "":
#                         allStr = key + ':\n' + value
#                     else:
#                         allStr = allStr + "\n" + key + ':\n' + value
#                 # allStr = allStr +"\n"+ "*" * 100
#
#                 for key, value in dic.items():
#                     wen_str="根据文本内容:\""+allStr+"\"\n"+"回答"+key+"是什么"
#                     ans_str=value
#                     logging.info("ask:\n"+wen_str)
#                     logging.info("answer:\n"+ans_str)
#                     logging.info("*" * 100)
#
#                 # logging.info('\n')
