import codecs
import numpy as np
import os
import re

import pandas as pd
from bitmap import bitmap
from gensim import corpora, models, similarities

import gen_dfff as gd
import gen_participle_jieba as jb

pattern_sign = re.compile(r'^【.+?】')


def load_csv(resdir, fn):
    # 分词结果文件
    # train_file = codecs.open(resdir + "/train." + fn + ".txt", 'w', 'utf-8')
    # test_file = codecs.open(resdir + "/test." + fn + ".txt", 'w', 'utf-8')

    # train_data = pd.read_csv(basedir + r'E:\号码优化\all_content.csv', header=None, error_bad_lines=False, skiprows=1)
    train_data = pd.read_csv(resdir + os.sep + fn, header=None, error_bad_lines=False,
                             skiprows=1)
    train_data.columns = ['eprId', 'uid', 'count', 'auditStatus', 'md5content', 'text']
    # print(train_data)
    # train_data = pd.merge(train_data, label_data, on='id', how='outer')
    count_doc = 0
    all_doc_list = []
    row_list = []
    # lastEprId=-1
    # lastUid=""
    # lastSign=""
    lastStr = ""
    for index, row in train_data.iterrows():
        rowstr = row['text']
        if not (isinstance(rowstr, str) and len(rowstr) > 0):
            print("错误" + str(rowstr))
            continue
        rowstr = rowstr.replace("\t", "").replace("\n", "")
        eprId = str(row['eprId'])
        uid = str(row['uid'])
        count = row['count']
        auditStatus = str(row['auditStatus'])
        # 结巴分词
        outline = jb.participle(rowstr)

        count_doc = count_doc + 1
        # outline.append(str(count_doc)) #加一列数字表示不一样
        # print(outline)

        search = pattern_sign.search(rowstr)
        # print(search)
        sign = search.group() if search else ""
        str2 = eprId + ":" + uid + ":" + sign
        # 判断是否同一个账号的内容和签名

        if str2 == lastStr:
            all_doc_list.append(outline)
            row_list.append(row)
            # 相同内容的多加一次
            if count > 1:
                all_doc_list.append(outline)
                row_list.append(row)
        else:
            if len(all_doc_list) > 1000:
                print("count_doc:" + str(count_doc))
                yield all_doc_list, row_list
                all_doc_list = []
                row_list = []

            lastStr = str2
            all_doc_list.append(outline)
            row_list.append(row)

        # if count_doc > 10000:
        #     break

    # 最后一批
    if len(all_doc_list) > 0:
        yield all_doc_list, row_list


def load_txt(resdir, fn):
    """
    第二次处理模板
    :param resdir:
    :param fn:
    :return:
    """
    # 分词结果文件
    # train_file = codecs.open(resdir + "/train." + fn + ".txt", 'w', 'utf-8')
    # test_file = codecs.open(resdir + "/test." + fn + ".txt", 'w', 'utf-8')
    # data_type = {'eprId': np.int16, 'uid': str, 'auditStatus': np.int16, 'text': str}
    # train_data = pd.read_csv(basedir + r'E:\号码优化\all_content.csv', header=None, error_bad_lines=False, skiprows=1)
    train_data = pd.read_table(resdir + os.sep + fn, header=None, error_bad_lines=False,
                               skiprows=0)
    train_data.columns = ['eprId', 'uid', 'auditStatus', 'md5content', 'text']
    train_data.replace(np.nan, 0, inplace=True)
    train_data.replace(np.inf, 0, inplace=True)
    train_data['auditStatus'] = train_data['auditStatus'].astype(np.int16)  # 线上环境auditStatus会读为float,要转为int类型
    # print(train_data)
    # train_data = pd.merge(train_data, label_data, on='id', how='outer')
    count_doc = 0
    all_doc_list = []
    row_list = []
    # lastEprId=-1
    # lastUid=""
    # lastSign=""
    lastStr = ""
    for index, row in train_data.iterrows():
        rowstr = row['text']
        if not (isinstance(rowstr, str) and len(rowstr) > 0):
            print("错误" + str(rowstr))
            continue
        rowstr = rowstr.replace("\t", "").replace("\n", "")
        eprId = str(row['eprId'])
        uid = str(row['uid'])
        auditStatus = row['auditStatus']
        # print("auditStatus.type:", type(auditStatus))
        # count = row['count']
        # 结巴分词
        outline = jb.participle(rowstr)

        count_doc = count_doc + 1
        # outline.append(str(count_doc)) #加一列数字表示不一样
        # print(outline)

        search = pattern_sign.search(rowstr)
        # print(search)
        sign = search.group() if search else ""
        str2 = eprId + ":" + uid + ":" + sign
        # 判断是否同一个账号的内容和签名

        if str2 == lastStr:
            all_doc_list.append(outline)
            row_list.append(row)
            # 相同内容的多加一次
            # if count>1:
            #     all_doc_list.append(outline)
            #     row_list.append(row)
        else:
            if len(all_doc_list) > 1000:
                # print("count:"+str(count))
                yield all_doc_list, row_list
                all_doc_list = []
                row_list = []
            lastStr = str2
            all_doc_list.append(outline)
            row_list.append(row)

        # if count_doc > 1000:
        #     break

    # 最后一批
    if len(all_doc_list) > 0:
        yield all_doc_list, row_list


def tran(resdir, fn, save_fn):
    with  codecs.open(resdir + os.sep + save_fn, 'w', 'utf-8') as rs_file:
        for all_doc_list, row_list in load_csv(resdir, fn):
            dictionary = corpora.Dictionary(all_doc_list)
            # print(dictionary.keys())

            # dictionary.token2id

            # 以下使用doc2bow制作语料库
            corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]

            # 使用doc2bow制作语料库
            tfidf = models.TfidfModel(corpus)
            # lsi = models.LsiModel(corpus)
            # corpus_lsi = lsi[corpus_tfidf]
            # tfidf = models.lsimodel(corpus)
            # 将tf-idf模型转为lsi模型,词库为dictionary,话题为300个
            # lsi = models.LsiModel(tfidf, id2word=dictionary, num_topics=300)
            # 获取测试文档中，每个词的TF-IDF值

            # 对每个目标文档，分析测试文档的相似度
            index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))
            # index = similarities.MatrixSimilarity(lsi[tfidf[corpus]])

            # 记录用过的行
            bm = bitmap.BitMap(all_doc_list.__len__())
            for i in range(all_doc_list.__len__()):
                # 已经匹配过的忽略
                if bm.test(i): continue
                doc_test_list = all_doc_list[i]
                cur_row = row_list[i]
                # print(doc_test_list)
                # 把测试文档也转换为二元组的向量
                doc_test_vec = dictionary.doc2bow(doc_test_list)
                sim = index[tfidf[doc_test_vec]]

                # print(sim)
                # 根据相似度排序
                rs = sorted(enumerate(sim), key=lambda item: -item[1])

                print("------rs start")
                # rs_file.write("------rs start"+ "\n")

                # print(rs)

                arr_list = [arr for arr in rs if arr[1] > 0.5]  # 过滤不匹配的行
                rs_list = []
                md5_list = []  # 最多保存5个md5
                for idx, value in arr_list:
                    row = row_list[idx]
                    bm.set(idx)  # 保存匹配过的索引
                    md5content = row['md5content']
                    rowstr = row['text']
                    rowstr = rowstr.replace("\t", "").replace("\n", "")
                    print(rowstr)
                    # rs_file.write(rowstr + "\n")
                    rs_list.append(rowstr)
                    if len(md5_list) < 5: md5_list.append(md5content)
                template = gd.diff_list(rs_list)
                print("------template:")
                if len(md5_list) == 0: md5_list.append(cur_row['md5content'])
                # rs_file.write("------template"+ "\n")
                str_template_ = str(cur_row["eprId"]) + "\t" + str(cur_row["uid"]) + "\t" + str(
                    cur_row["auditStatus"]) + "\t" + ";".join(md5_list) + "\t" + str(template)
                str_template_ = re.sub(r"\n+", "***", str_template_)  # "替换空行"
                print(str_template_)
                if template:
                    rs_file.write(str_template_ + "\n")
                print("------rs end")
                # rs_file.write("------rs end"+ "\n")
                rs_file.flush()
    # print(bm.tostring())
    # rs_file.close()


def cut_md5(md5_str):
    """保留5个"""
    return ";".join(md5_str.split(";")[:5])


def tran2(resdir, fn, save_fn):
    with codecs.open(resdir + os.sep + save_fn, 'w', 'utf-8') as rs_file:
        for all_doc_list, row_list in load_txt(resdir, fn):
            dictionary = corpora.Dictionary(all_doc_list)
            # print(dictionary.keys())

            # dictionary.token2id

            # 以下使用doc2bow制作语料库
            corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]

            # 使用doc2bow制作语料库
            tfidf = models.TfidfModel(corpus)
            # lsi = models.LsiModel(corpus)
            # corpus_lsi = lsi[corpus_tfidf]
            # tfidf = models.lsimodel(corpus)
            # 将tf-idf模型转为lsi模型,词库为dictionary,话题为300个
            # lsi = models.LsiModel(tfidf, id2word=dictionary, num_topics=300)
            # 获取测试文档中，每个词的TF-IDF值

            # 对每个目标文档，分析测试文档的相似度
            index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))
            # index = similarities.MatrixSimilarity(lsi[tfidf[corpus]])

            # 记录用过的行
            bm = bitmap.BitMap(all_doc_list.__len__())
            for i in range(all_doc_list.__len__()):
                # 已经匹配过的忽略
                if bm.test(i): continue
                doc_test_list = all_doc_list[i]
                cur_row = row_list[i]
                # print(doc_test_list)
                # 把测试文档也转换为二元组的向量
                doc_test_vec = dictionary.doc2bow(doc_test_list)
                sim = index[tfidf[doc_test_vec]]

                # print(sim)
                # 根据相似度排序
                rs = sorted(enumerate(sim), key=lambda item: -item[1])

                print("------rs start")
                # rs_file.write("------rs start"+ "\n")
                #
                # print(rs)

                arr_list = [arr for arr in rs if arr[1] > 0.5]  # 过滤不匹配的行
                rs_list = []
                md5_list = []  # 最多保存5个md5
                for idx, value in arr_list:
                    row = row_list[idx]
                    bm.set(idx)  # 保存匹配过的索引
                    md5content = row['md5content']
                    rowstr = row['text']
                    rowstr = rowstr.replace("\t", "").replace("\n", "")
                    print(rowstr)
                    # rs_file.write(rowstr + "\n")
                    rs_list.append(rowstr)
                    if len(md5_list) < 5: md5_list.append(md5content)
                template = gd.diff_list(rs_list)
                print("------template:")
                # rs_file.write("------template"+ "\n")
                if len(md5_list) == 0: md5_list.append(cur_row['md5content'])

                str_template_ = str(template) if template else rowstr
                str_template_ = str(cur_row["eprId"]) + "\t" + str(cur_row["uid"]) + "\t" + str(
                    cur_row["auditStatus"]) + "\t" + cut_md5(";".join(md5_list)) + "\t" + str_template_
                print(str_template_)
                rs_file.write(str_template_ + "\n")
                print("------rs end")
                # rs_file.write("------rs end"+ "\n")
                rs_file.flush()
            # print(bm.tostring())
    # rs_file.close()


def word_replace(resdir, fn, save_fn):
    """替换词表"""
    word_list = []
    """去重"""
    with codecs.open(resdir + os.sep + fn, 'r', 'utf8') as f:
        for line in f:
            line = line.strip()
            word_list.append(line)
            # if line not in word_list:
            #     word_list.append(line)
            #     print(line)
            # else:
            #     continue
    word_list = list(set(word_list))
    word_list.sort()
    with codecs.open(resdir + os.sep + save_fn, 'w', 'utf8') as f:
        for line in word_list:
            print(line)
            f.write(line + '\n')


if __name__ == '__main__':
    # tran(".", "ctx.csv")
    # tran(".", "test.csv","rs2.txt")
    # word_replace(".", "rs2.txt", "rs2_sort.txt")
    tran2(".", "rs2_sort.txt", "rs3.txt")
    print(cut_md5("asdf;asdf;asdf;as;1123;12;123"))
    print(cut_md5("asdf;asdf"))
