# -*-coding = utf-8 -*-
# @Time : 2022/8/30
# @Author : LuCool
# @File : Similarity_compare.py
# @Software : PyCharm
# 导入第三方库
import os
import re
import time
import pandas as pd
from pprint import pprint
import pdfplumber
import jieba
import codecs
import numpy as np
from collections import Counter
import jieba.analyse
import shutil
from datetime import datetime
from sklearn.metrics.pairwise import cosine_similarity
import ast
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer


def filter_data(database):
    path_list = os.listdir(pdf_path)
    info_list = []
    for file in path_list:
        try:
            date = re.findall("\d{8}", file, re.DOTALL)[0]
        except:
            date = "None"
        try:
            ib = re.findall("\d{8}-(.*?)-", file, re.DOTALL)[0]
        except:
            ib = "None"
        try:
            issuer = re.findall("-.*?-(.*?)", file, re.DOTALL)[0]
        except:
            issuer = "None"
        
        info_dict = {
            "date": date,
            "ib": ib,
            "issuer": issuer,
        }
        info_list.append(info_dict)
    
    #  转化为列表
    date_list = [i["date"] for i in info_list]
    ib_list = [i["ib"] for i in info_list]
    issuer_list = [i["issuer"] for i in info_list]
    
    # 筛选文件存在的数据
    filter_rst = database[(database["release_date"].isin(
        date_list)) & database["IB"].isin(ib_list) & database["FIRM"].isin(issuer_list)]
    
    return filter_rst


def vectorized_data(key_info, synonyms_lists, stopwords, vector1=[], nums_in="without_number_txt"):
    """
    函数说明：将分词结果向量化表示
    Parameters:
        nums_in: txt文件地址
        "with_number_txt": 处理带数字的txt
        "without_number_txt": 处理不带数字的txt
        synonyms_lists: 同义词列表
        vector1: 要比对文档的分词结果
    Returns:
        word_counter: 词频统计结果
    """
    # 同义字列表并集
    synonyms_list_merge = [word for words in synonyms_lists for word in words]
    
    txt_content = []
    # 读取txt中的信息
    txt_path = os.path.join(root_directory, "TXT", nums_in,
                            fr"{'_'.join(key_info.split('|'))}.txt")
    try:
        txt = codecs.open(txt_path, "r", encoding="utf-8").read()
    except:
        txt = codecs.open(txt_path, "r", encoding="gbk").read()
    if nums_in == "without_number_txt":
        texts = txt.split("\r\n")
        for text in texts:
            text = re.sub("[^\u4e00-\u9fa5\s]+", " ", text, re.DOTALL)
            txt_content.append(text)
    txt = "".join(txt_content)
    
    # 使用tf-idf过滤不重要的词
    tfidf_result = jieba.analyse.extract_tags(
        txt, topK=10000, withWeight=True, allowPOS=())
    tfidf_keys = [j[0] for j in tfidf_result]
    # 对文本进行jieba分词并保留tfidf中重要的词
    cut_result = jieba.cut(txt)
    segments = [
        seg for seg in cut_result if seg not in stopwords and seg in tfidf_keys]
    # 统计jieba分词的词频结果
    word_counter = Counter()
    for seg in segments:
        word_counter[seg] += 1
    # 转化为列表用于查询同义词  2150
    word_list = list(set(word_counter.keys()))
    
    # 求同义词字典与分词结果的交集
    inter_set = set(synonyms_list_merge).intersection(set(word_list))
    keep_synonyms_lists = []
    for synonyms_list in synonyms_lists:
        for synonym in synonyms_list:
            if synonym in inter_set and synonyms_list not in keep_synonyms_lists:
                keep_synonyms_lists.append(synonyms_list)
    keep_synonyms_merge = [
        word for words in keep_synonyms_lists for word in words]
    
    drop_list = []
    # 遍历交集中的词
    for seg in inter_set:
        seg_index = keep_synonyms_merge.index(seg)
        if seg_index > len(keep_synonyms_merge) / 2:
            # 倒序遍历【同义词词典】
            iter_synonyms_lists = keep_synonyms_lists[::-1]
        else:  # 默认正序遍历
            iter_synonyms_lists = keep_synonyms_lists
        for synonym_list in iter_synonyms_lists:
            # 在每个【同义词列表】中查找【分词】
            if synonym_list.count(seg):
                synonym_list.remove(seg)
                # 查找同义词组中的其他词是否出现在分词结果中
                for synonym in synonym_list:
                    if word_list.count(synonym):
                        # 是否出现在前一个文档的向量词频中
                        synonym_list_withseg = (
                                "|".join(synonym_list) + "|" + seg).split("|")
                        v1_find = {s: vector1.count(s)
                                   for s in synonym_list_withseg}
                        v1_values = [i for i in v1_find.values()]
                        if any(v1_values):
                            word_counter[sorted(
                                v1_find, key=lambda x: v1_find[x])[-1]] += word_counter[seg]
                            word_counter[sorted(
                                v1_find, key=lambda x: v1_find[x])[-1]] += word_counter[synonym]
                            drop_list.extend([seg, synonym])
                            print(v1_find, synonym, sorted(
                                v1_find, key=lambda x: v1_find[x])[-1], seg)
                            break
                        
                        else:
                            # 频率增加
                            word_counter[synonym] += word_counter[seg]
                            drop_list.append(seg)
    # 删除对应的同义词
    for drop_word in set(drop_list):
        word_counter.pop(drop_word)  # 1836
    
    # txt2 = " ".join([(k + " ") * word_counter[k] for k in word_counter.keys()])
    # # freq = int(len(word_counter)*0.9)
    # #  使用tfidf再次过滤
    # tfidf_result = jieba.analyse.extract_tags(txt2, topK=10000, withWeight=True, allowPOS=())
    # # tfidf_result = jieba.analyse.extract_tags(txt2, topK=freq, withWeight=True, allowPOS=())
    # tfidf_keys = [j[0] for j in tfidf_result]
    # cut_words2 = [i for i in jieba.cut(txt2) if i != ' ' and i in tfidf_keys]
    # # 统计词频结果
    # word_counter = Counter()
    # for seg in cut_words2:
    #     word_counter[seg] += 1
    return word_counter


def date_ch(value):
    return datetime.strptime(value, '%Y%m%d')


def groupby_reports():
    """
    函数说明：将报告进行分组（同一组内标记同一 cluster_id）并返回分组结果
    Returns:
        group_of_all: 分组结果
    """
    # 2.根据文件名中的信息匹配excel中的信息
    # 将日期变为index
    database = pd.read_excel(xlsx_path, date_parser=True)
    # 根据存在的文件筛选数据
    # database = filter_data(database)
    # 剔除日期列中出现汉字的数据
    database = database[database["release_date"].str.findall(
        "[\u4e00-\u9fa5]").isna()]
    # 修改为日期格式的数据
    database["release_date"] = database["release_date"].map(str).apply(date_ch)
    # 以年这一列作为索引
    database.set_index('release_date', inplace=True)
    
    # 按照日期排序
    database.sort_values(by=["Ticker", "release_date"], inplace=True)
    
    # 存储全部报告信息的列表
    group_of_all = []
    # 每一个上市公司为一组进行遍历，对每一个上市公司的报告进行分组
    issuer_group = {i for i in list(set(database.FIRM.tolist()))}
    
    for temp_var in issuer_group:
        issuer_df = database[database.FIRM == temp_var]
        if len(issuer_df) < 3:
            continue
        
        issuer = issuer_df.FIRM.tolist()[0]
        # 按照日期对报告进行分组
        ROLLING_DAYS = 90
        interval_count, rolling_days_list = 0, []
        group_num, non_group_num = 0, 0
        interval_count_dict = {
            (issuer, group_num): [],
            (issuer, non_group_num): []
        }
        
        # 计算每两个日期之间的时间间隔
        date_list = issuer_df.index.tolist()
        
        for index, t in enumerate(date_list[:-1]):
            rolling_interval = (date_list[index + 1] - t).days
            
            # 将ROLLING_DAYS天数以内的报告归为一组，其余归为另外一组
            if rolling_interval <= ROLLING_DAYS:
                interval_count_dict[(issuer, group_num)].append(
                    t) if t not in interval_count_dict[(issuer, group_num)] else 0
                interval_count_dict[(issuer, group_num)].append(
                    date_list[index + 1])
                interval_count += rolling_interval
                conditions = [
                    interval_count > ROLLING_DAYS,
                    rolling_interval <= ROLLING_DAYS and len(
                        interval_count_dict[(issuer, group_num)]) >= 3
                ]
                if any(conditions):
                    interval_count = 0
                    if len(interval_count_dict[(issuer, group_num)]) < 3:
                        interval_count_dict[(issuer, group_num)].clear()
                    else:
                        group_num += 1
                        rolling_days_list.append(interval_count_dict)
                        interval_count_dict = {(issuer, group_num): []}
        
        # 已经分组的报告归为一组，将其余报告添加到non_group的列表中
        final_list = []
        for list_obj in rolling_days_list:
            final_list.extend(list(list_obj.values())[0])
            # 已经分组的报告
            group_rst = []
            temp_group_df = issuer_df.loc[list(list_obj.values())[0], :]
            temp_group_df["match_info"] = temp_group_df["FIRM"] + "|" + \
                                          temp_group_df["IB"].map(str) + "|" + \
                                          temp_group_df.index.map(str)
            for v in temp_group_df["match_info"].tolist():
                k = re.sub("-| 00:00:00", "", v)
                temp_list = [{k: j} for j in os.listdir(pdf_path) if k.split(
                    "|")[0] in j if k.split("|")[1] in j if k.split("|")[2] in j]
                try:
                    group_rst.append(temp_list[0])
                except:
                    pass
            
            '''
            # 将剩余报告信息添加到列表中  -- 暂时不做
            non_group_df = issuer_df.drop(index=final_list)
            non_group_df["match_info"] = non_group_df["IB"].map(str) + "|" + non_group_df["FIRM"]
            group_rst.append(non_group_df["match_info"].tolist())
            '''
            if group_rst:
                group_of_all.append(group_rst)
    
    # 存储分组信息
    save_df = pd.DataFrame(group_of_all)
    save_df.to_csv(os.path.join(root_directory, "group_result.csv"))
    
    return group_of_all


def trans_txt(file_path, key_info):
    """
    函数说明：读取文件，剔除不相关信息（目录，页码等）
    Parameters:
        file_path: 文件地址
        key_info: 关键词信息
    """
    # 提取获得的信息
    # for nums_choice in ["with_number_txt", "without_number_txt"]:
    nums_choice= "without_number_txt"
    save_path = os.path.join(
        root_directory, "TXT", nums_choice, fr"{'_'.join(key_info.split('|'))}.txt")
    fp = open(save_path, "a", encoding='GBK', errors="ignore")
    try:
        with pdfplumber.open(file_path) as doc:
            # 提出数据并存储到txt中
            for page_num, page in enumerate(doc.pages):
                try:
                    with_number_txt = page.extract_text()
                except:
                    fp.write("write error!")
                    continue
                # if "." * 20 not in with_number_txt:
                # if nums_choice == "with_number_txt":  # 去掉字母不去掉数字进行存储
                #     with_number_txt = re.sub(
                #         "[^\d\u4e00-\u9fa5]", "", with_number_txt, re.DOTALL)
                #     fp.write(with_number_txt)
                # else:
                drop_num_var = re.sub(
                    "\d", "", with_number_txt, re.DOTALL)
                without_number_txt = re.sub(
                    "[^\u4e00-\u9fa5]", "", drop_num_var, re.DOTALL)
                fp.write(without_number_txt)
    except:
        fp.write("write error!")
        # continue


def calculate_cos_distance(word_counter1, word_counter2):
    """
    函数说明：计算余弦相似度
    Parameters:
        word_counter: 词频统计结果
    Returns:
        calc_result: 余弦相似度
    """
    # 求词频并集
    key_list = []
    for key1 in word_counter1.keys():
        key_list.append(key1)
    for key2 in word_counter2.keys():
        key_list.append(key2)
    # 词频向量化
    vector1, vector2 = [], []
    for key in key_list:
        vector1.append(word_counter1[key]) if key in word_counter1.keys(
        ) else vector1.append(0)
        vector2.append(word_counter2[key]) if key in word_counter2.keys(
        ) else vector2.append(0)
    # 计算余弦相似度
    calc_result = cosine_similarity([vector1, vector2])
    return calc_result


def mk_file(file_path):
    """
    函数说明：如果未检测到文件夹则进行创建
    Parameters:
        file_path: 检测文件夹地址
    """
    if not os.path.exists(file_path):
        os.mkdir(file_path)
    print(f"{file_path}路径的文件夹已经创建好")


def prepare_work():
    """
    函数说明：完成准备工作
    """
    # 创建根目录
    mk_file(root_directory)
    # 创建存储txt文件的文件夹
    txt_path = os.path.join(root_directory, "TXT")
    
    mk_file(txt_path)
    mk_file(os.path.join(txt_path, "with_number_txt"))
    mk_file(os.path.join(txt_path, "without_number_txt"))
    
    # 移动 pdf
    pdf_file_path = os.path.join(root_directory, "File")
    mk_file(pdf_file_path)
    for file in os.listdir(pdf_path):
        old_path = os.path.join(pdf_path, file)
        new_path = os.path.join(pdf_file_path, file)
        try:
            shutil.copy(old_path, new_path)
        except:
            pass
    
    # jieba词库导入
    for root, files, corpus in os.walk(r"../jieba语料库/分词词库"):
        for user_dict in corpus:
            jieba.load_userdict(os.path.join(root, user_dict))
    
    # 设置停词列表
    with open(r"../jieba语料库/stopwords.txt", 'r', encoding='utf8') as f:
        content = f.readlines()
    stopwords = [w.strip() for w in content]
    stopwords.extend([' ', '\r\n', '）', '/', '（'])
    
    # 同义词字典信息导入
    sim_directory_path = r"../jieba语料库/哈工大社会计算与信息检索研究中心同义词词林扩展版.txt"
    sim_words_list = codecs.open(
        sim_directory_path, "r", encoding="gbk").read().split("\r\n")
    # 只保留同义词的词组（去除相关词词组和反义词词组）
    synonyms_lists = list(map(lambda x: re.findall(
        "= (.*)", x)[0].split(" "), filter(lambda x: re.findall("=", x), sim_words_list)))
    
    return stopwords, synonyms_lists


def main(nums_in="without_number_txt"):
    """
    函数说明：主函数
    Parameters:
        nums_in:
            "with_number_txt": 处理带数字的txt
            "without_number_txt": 处理不带数字的txt
    """
    start_time = time.time()  # 计时
    # 创建文件夹，导入jieba分词库
    stopwords, synonyms_lists = prepare_work()
    # 报告分组
    print("开始读入报告分组结果......")
    group_of_all = []
    # try:
        # 尝试读取本地分组结果
        # group_df1 = pd.read_csv(os.path.join(
        #     root_directory, "group_result.csv"))
    group_df = pd.read_excel(os.path.join(
        root_directory, "2018_2021all_deep_reports_2022Oct21.xls"))
    # except FileNotFoundError:
    #     print("本地不存在分组报告结果，需要重新进行分组，正在进行分组......")
    # finally:
        # iter_rows = group_df1.values.tolist()
        # # 剔除空值并输出读取结果
        # for row in iter_rows:
        #     eval_rst = []
        #     for i in row[1:]:
        #         if not (i is np.nan):
        #             eval_rst.append(ast.literal_eval(i))
        #     group_of_all.append(eval_rst)
        # {'证券公司'|'ib'|'date':file_name}
    group_df['x'] = pd.Series([i for i in map(lambda x: re.sub(
        '(.*/)', '', x, re.DOTALL), group_df['x'].tolist())])
    group_df['date'] = pd.Series([i for i in map(lambda x: re.findall(
        '(.*?)-', x, re.DOTALL)[0], group_df['x'].tolist())])
    group_df["info"] = group_df.loc[:, ['x', 'FIRM', 'IB', 'date']].apply(
        lambda x: {'|'.join([x.tolist()[1], x.tolist()[2], x.tolist()[3]]): x.tolist()[0]}, axis=1)
    
    for ID in set(group_df['clusterID']):
        group_of_all.append(list(group_df[group_df['clusterID'] == ID].loc[:, 'info']))
    print("PDF报告分组完成!!!")
    cluster_id_list = list(set(group_df['clusterID'].tolist()))

    print("开始计算报告相似度对比......")
    group_result_list, group_len = [], len(group_of_all)
    # 遍历每一组
    for group_nums, group in zip(cluster_id_list, group_of_all):
        # 提取获得的信息
        file_path_list = [os.path.join(
            root_directory, "File", list(file.values())[0]) for file in group]
        key_info_list = [list(file.keys())[0] for file in group]
        
        #  是否写入到txt文档当中，如果已经写入直接读取即可
        # for file_path, key_info in zip(file_path_list, key_info_list):
        #     txt_path = os.path.join(
        #         root_directory, "TXT", nums_in, fr"{'_'.join(key_info.split('|'))}.txt")
        #     if os.path.exists(txt_path) is False:
        #         # 为检索到对应txt文档，对该pdf信息进行提取并写入到txt
        #         trans_txt(file_path, key_info)
        #         print(os.path.basename(txt_path),
        #           f"写入完成！目前写入进度为{group_nums}/{group_len}组")
        # continue
        
        result_list = []
        # 每一组中的报告两两组合进行余弦相似度比较
        for index, info in enumerate(zip(file_path_list[:-1], key_info_list[:-1])):
            file_path1, key_info1 = info
            for file_path2, key_info2 in zip(file_path_list[index + 1:], key_info_list[index + 1:]):
                if file_path1 != file_path2:
                    vector1 = vectorized_data(
                        key_info1, synonyms_lists, stopwords, nums_in=nums_in)
                    vector2 = vectorized_data(key_info2, synonyms_lists, stopwords, list(
                        vector1.keys()), nums_in=nums_in)
                    # 计算余弦相似度
                    print(
                        f"开始对比：{os.path.basename(file_path1).strip('.pdf')}&{os.path.basename(file_path2).strip('.pdf')}......")
                    count_time = time.time()
                    try:
                        calc_result = calculate_cos_distance(
                            vector1, vector2)[0, 1]
                    except:
                        calc_result = 0
                    end_time = time.time()
                    print(
                        f"对比相似度为: {calc_result:.4f}, 本次计算耗时{end_time - count_time:.4f}s，总耗时{end_time - start_time:.4f}s, 进行到{group_nums + 1}/{group_len}组")
                    print("- " * 30)
                    # 将结果存储为[{(file1, file2):similarity_ratio}]格式
                    result_list.append([group_nums, key_info1.split("|")[0], key_info1.split("|")[
                        1], key_info2.split("|")[1], calc_result, file_path1, file_path2])
        group_result_list.append(result_list)
    
    # 创建空容器来存储数据
    result_df = pd.DataFrame()
    for group in group_result_list:
        result_df = pd.concat(
            [result_df, pd.DataFrame(group)], ignore_index=False)
    result_df.columns = ["groups_nums", "FIRM",
                         "IB1", "IB2", "similarity_ratio", "file_path1", "file_path2"]
    
    # 写入到excel当中
    print("开始写入到本地......")
    result_path = os.path.join(root_directory, f"{nums_in}_result.xlsx")
    result_df.to_excel(result_path)
    print("写入完成！！！")


if __name__ == '__main__':
    # TODO 唯一需要修改的地方：需要将 pdf_path 修改存放 pdf的文件文件
    pdf_path = r"E:\Data\File"
    root_directory = r"E:\Data"
    xlsx_path = os.path.join(root_directory, "reportname_8288.xlsx")
    main()
