import numpy as np
import time
import xlrd
import click
import os
import xlrd
import logging
import re
import jieba
from gensim.corpora import Dictionary
from harvesttext import HarvestText
from harvesttext.resources import get_baidu_stopwords
from tqdm import tqdm
from pandas.core.frame import DataFrame
import seaborn as sns
import pandas as pd


ht = HarvestText()


def clean(text):
    """
    无效字符清理
    :param text:
    :return:
    """
    cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9,.，。！？!?()（）@《》]")  # 匹配不是中文、大小写、数字的其他字符
    string1 = cop.sub('', text)
    return string1


def original_text_load(path, xls_column=2):
    """
    读取多个xls文件并聚合。内容来自meiya system
    # parameter

    path : `str`
        文件夹的路径

    # return

    docs_list : `List[str]`
        文档的列表

    """
    file_list = os.listdir(path)
    file_list = [i for i in file_list if not i.startswith('.') and i.endswith('.xls')]  # 去除隐藏文件无关项
    docs_list = []
    tag_list = []

    logging.info(f'开始加载数据 {path}')
    print('开始加载数据\n')
    for xls_file in tqdm(file_list):
        work_book = xlrd.open_workbook(os.path.join(path, xls_file))
        work_sheets = work_book.sheets()  # 默认第一个工作表
        for sheet in work_sheets:
            col_data = sheet.col_values(xls_column)
            col_data.pop(0)  # 删除表头
            docs_list = docs_list + col_data

    only = set()
    clean_doc_list = []
    logging.info('清洗数据 开始')
    print('开始 清洗数据\n')
    for index, doc in tqdm(enumerate(docs_list)):
        if hash(clean(doc)[:10]) not in only:  # 确认不重复
            only.add(hash(clean(doc)[:10]))
        else:
            continue
        tmp_text = ht.clean_text(doc).replace('...', '').replace('#', '')
        if len(tmp_text) <= 7:  # 小于七个中文字符直接放弃
            continue

        clean_doc_list.append(tmp_text)

    logging.info(f'全部 doc_length: {len(docs_list)}')
    logging.info(f'有效 doc_length: {len(clean_doc_list)}')

    stop_word = list(get_baidu_stopwords())
    cut_docs = []
    for doc in tqdm(clean_doc_list):  # 首先进行分词
        doc = re.sub('[^\u4e00-\u9fa5]+', ' ', doc)
        cut_doc = list(jieba.cut_for_search(doc))
        cut_doc = [i for i in cut_doc if i not in stop_word and i != ' ' and len(i) >= 2]  # 去除停用词
        cut_docs.append(' '.join(cut_doc))  # 为了配合 get_set_key

    return cut_docs


def get_set_key(data, threshold=30, above=0.5, hard_conctrl=0):
    """
    选取频数大于等于Threshold的关键词构建一个集合，用于作为共现矩阵的首行和首列
    :param hard_conctrl:
    :param data:
    :param threshold:
    :param above:
    :return:
    """
    all_key = ' '.join(data)  # 把所有数据串联起来
    key_list = [it.strip() for it in all_key.strip().split(' ')]  # 分割出所有的词语
    keys = set(key_list)  # 去重
    dic = dict(zip(keys, [key_list.count(k) for k in keys]))  # 基础频次统计
    dict_all = Dictionary([i.split(' ') for i in data])
    proportion = dict(zip(keys, [dict_all.dfs[dict_all.token2id[i]] / len(data) for i in keys]))

    wf = {k: v for k, v in dic.items() if k != '' and v >= threshold and proportion[k] <= above}
    set_key_list = []
    for a in sorted(wf.items(), key=lambda item: item[1], reverse=True):
        set_key_list.append(a[0])

    if hard_conctrl:
        set_key_list = set_key_list[:hard_conctrl]
    return set_key_list


def format_data(data, set_key_list):
    """
    格式化需要计算的数据，将原始数据格式转换成二维数组
    :param data:
    :param set_key_list:
    :return:
    """

    formated_data = []
    for ech in data:
        ech_line = ech.split(' ')

        temp = []  # 筛选出format_data中属于关键词集合的词
        for e in ech_line:
            if e in set_key_list:
                temp.append(e)
        ech_line = temp
        ech_line = list(set(filter(lambda x: x != '', ech_line)))  # set去掉重复数据
        formated_data.append(ech_line)
    return formated_data


def build_matirx(set_key_list):
    '''建立矩阵，矩阵的高度和宽度为关键词集合的长度+1'''
    edge = len(set_key_list) + 1
    # matrix = np.zeros((edge, edge), dtype=str)
    matrix = [[0 for j in range(edge)] for i in range(edge)]
    return matrix


def init_matrix(matrix, set_key_list):
    '''初始化矩阵，将关键词集合赋值给第一列和第二列'''
    matrix[0][1:] = np.array(set_key_list)
    matrix = list(map(list, zip(*matrix)))
    matrix[0][1:] = np.array(set_key_list)
    # print(matrix)
    return matrix


def count_matrix(matrix, formated_data):
    '''计算各个关键词共现次数'''
    keywordlist = matrix[0][1:]  # 列出所有关键词
    appeardict = {}  # 每个关键词与 [出现在的行(formated_data)的list] 组成的dictionary
    for w in keywordlist:
        appearlist = []
        i = 0
        for each_line in formated_data:
            if w in each_line:
                appearlist.append(i)
            i += 1
        appeardict[w] = appearlist
    for row in range(1, len(matrix)):
        # 遍历矩阵第一行，跳过下标为0的元素
        for col in range(1, len(matrix)):
            # 遍历矩阵第一列，跳过下标为0的元素
            # 实际上就是为了跳过matrix中下标为[0][0]的元素，因为[0][0]为空，不为关键词
            if col >= row:
                # 仅计算上半个矩阵
                if matrix[0][row] == matrix[col][0]:
                    # 如果取出的行关键词和取出的列关键词相同，则其对应的共现次数为0，即矩阵对角线为0
                    matrix[col][row] = 0
                else:
                    counter = len(set(appeardict[matrix[0][row]]) & set(appeardict[matrix[col][0]]))

                    matrix[col][row] = counter
            else:
                matrix[col][row] = matrix[row][col]
                '''
                矩阵结构详解
                matrix[0][1....n]:是词语
                matrix[1...n][0]:也是词语
                也就是说，第一行和第一列全是词语
                所以
                matrix[1][1]就是 matrix[1][0]和matrix[0][1]的内容
                matrix[3][4]就是 matrix[3][0]和matrix[0][4]的内容
                '''
    return matrix


def heatmap_print(matrix, save_name='test'):
    """
    绘制词共现矩阵可视化图片
    :param save_name:
    :param matrix:
    :return:
    """
    # pure_mat = np.array([i[1:] for i in matrix[1:]])
    # row = pure_mat.sum(axis=1)
    col_x, col_y, count = [], [], []
    for i in range(1, len(matrix)):
        for j in range(1, len(matrix)):
            col_x.append(matrix[i][0])
            col_y.append(matrix[0][j])
            # count.append(matrix[i][j] / row[i - 1])
            count.append(matrix[i][j])

    df = {'x': col_x, 'y': col_y, 'c': count}
    df = DataFrame(df)

    sns.set_theme(style="whitegrid")
    rc = {'axes.unicode_minus': False}
    sns.set(font='KaiTi', rc=rc)
    g = sns.relplot(
        data=df,
        x="x", y="y", hue="c", size="c",
        palette="rocket_r", hue_norm=(0, 800), edgecolor=".7",
        height=10, sizes=(100, 600), size_norm=(0, 800),
    )

    # Tweak the figure to finalize
    g.set(xlabel="", ylabel="", aspect="equal")
    g.despine(left=True, bottom=True)
    g.ax.margins(.02)
    for label in g.ax.get_xticklabels():
        label.set_rotation(90)
    for artist in g.legend.legendHandles:
        artist.set_edgecolor(".7")
    g.savefig(f'{save_name}.jpg', dpi=150)


def graph_data_output(matrix, path='graph_data'):
    data = pd.DataFrame(matrix)
    data.to_csv(f'{path}.csv', encoding='utf_8_sig')


@click.command()
@click.argument('xls_dataset_route', nargs=-1)
@click.option('--xls_column', '-c', default=2)
@click.option('--threshold', '-t', default=30)
@click.option('--above', '-a', default=0.5)
@click.option('-savefig_name', '-s', default='test')
@click.option('--save_csv_name', '-n', default='graph_data')
@click.option('-hard', '-c', default=35)
def main(xls_dataset_route, xls_column, threshold, above, savefig_name, save_csv_name, hard):
    """

    :param xls_dataset_route:
    :param xls_column:
    :param threshold: 
    :param above:
    :param savefig_name:
    :param hard:
    :return:
    """
    keyword_path = ''.join(xls_dataset_route)
    data = original_text_load(keyword_path, xls_column=xls_column)
    set_key_list = get_set_key(data, threshold=threshold, above=above, hard_conctrl=int(hard))
    print(len(set_key_list))
    formated_data = format_data(data, set_key_list)
    matrix = build_matirx(set_key_list)
    matrix = init_matrix(matrix, set_key_list)
    result_matrix = count_matrix(matrix, formated_data)
    # print(result_matrix)
    # np.savetxt(output_path, result_matrix, fmt=('%s,' * len(matrix))[:-1])
    heatmap_print(result_matrix, savefig_name)
    graph_data_output(result_matrix, path=save_csv_name)


if __name__ == '__main__':
    main()
