import numpy as np
import click
import os
import xlrd
import logging
import re
import jieba
from gensim.corpora import Dictionary
from harvesttext import HarvestText
from harvesttext.resources import get_baidu_stopwords
from tqdm import tqdm
import pandas as pd
from communities.algorithms import louvain_method
from communities.algorithms import girvan_newman
from communities.algorithms import hierarchical_clustering
from communities.algorithms import bron_kerbosch


ht = HarvestText()


def clean(text):
    """
    无效字符清理
    :param text:
    :return:
    """
    cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9,.，。！？!?()（）@《》]")  # 匹配不是中文、大小写、数字的其他字符
    string1 = cop.sub('', text)
    return string1


def original_text_load(path, xls_column=2):
    """
    读取多个xls文件并聚合。内容来自meiya system
    # parameter

    path : `str`
        文件夹的路径

    # return

    docs_list : `List[str]`
        文档的列表

    """
    file_list = os.listdir(path)
    file_list = [i for i in file_list if not i.startswith('.') and i.endswith('.xls')]  # 去除隐藏文件无关项
    docs_list = []
    tag_list = []

    logging.info(f'开始加载数据 {path}')
    print('开始加载数据\n')
    for xls_file in tqdm(file_list):
        work_book = xlrd.open_workbook(os.path.join(path, xls_file))
        work_sheets = work_book.sheets()  # 默认第一个工作表
        for sheet in work_sheets:
            col_data = sheet.col_values(xls_column)
            col_data.pop(0)  # 删除表头
            docs_list = docs_list + col_data

    only = set()
    clean_doc_list = []
    logging.info('清洗数据 开始')
    print('开始 清洗数据\n')
    for index, doc in tqdm(enumerate(docs_list)):
        if hash(clean(doc)[:10]) not in only:  # 确认不重复
            only.add(hash(clean(doc)[:10]))
        else:
            continue
        tmp_text = ht.clean_text(doc).replace('...', '').replace('#', '')
        if len(tmp_text) <= 7:  # 小于七个中文字符直接放弃
            continue

        clean_doc_list.append(tmp_text)

    logging.info(f'全部 doc_length: {len(docs_list)}')
    logging.info(f'有效 doc_length: {len(clean_doc_list)}')

    stop_word = list(get_baidu_stopwords())
    cut_docs = []
    for doc in tqdm(clean_doc_list):  # 首先进行分词
        doc = re.sub('[^\u4e00-\u9fa5]+', ' ', doc)
        cut_doc = list(jieba.cut_for_search(doc))
        cut_doc = [i for i in cut_doc if i not in stop_word and i != ' ' and len(i) >= 2]  # 去除停用词
        cut_docs.append(' '.join(cut_doc))  # 为了配合 get_set_key

    return cut_docs


def get_set_key(data, threshold=30, above=0.5, hard_conctrl=0):
    """
    选取频数大于等于Threshold的关键词构建一个集合，用于作为共现矩阵的首行和首列
    :param hard_conctrl:
    :param data:
    :param threshold:
    :param above:
    :return:
    """
    all_key = ' '.join(data)  # 把所有数据串联起来
    key_list = [it.strip() for it in all_key.strip().split(' ')]  # 分割出所有的词语
    keys = set(key_list)  # 去重
    dic = dict(zip(keys, [key_list.count(k) for k in keys]))  # 基础频次统计
    dict_all = Dictionary([i.split(' ') for i in data])
    proportion = dict(zip(keys, [dict_all.dfs[dict_all.token2id[i]] / len(data) for i in keys]))

    wf = {k: v for k, v in dic.items() if k != '' and v >= threshold and proportion[k] <= above}
    set_key_list = []
    for a in sorted(wf.items(), key=lambda item: item[1], reverse=True):
        set_key_list.append(a[0])

    if hard_conctrl:
        set_key_list = set_key_list[:hard_conctrl]
    return set_key_list


def format_data(data, set_key_list):
    """
    格式化需要计算的数据，将原始数据格式转换成二维数组
    :param data:
    :param set_key_list:
    :return:
    """

    formated_data = []
    for ech in data:
        ech_line = ech.split(' ')

        temp = []  # 筛选出format_data中属于关键词集合的词
        for e in ech_line:
            if e in set_key_list:
                temp.append(e)
        ech_line = temp
        ech_line = list(set(filter(lambda x: x != '', ech_line)))  # set去掉重复数据
        formated_data.append(ech_line)
    return formated_data


def build_matirx(set_key_list):
    '''建立矩阵，矩阵的高度和宽度为关键词集合的长度+1'''
    edge = len(set_key_list) + 1
    # matrix = np.zeros((edge, edge), dtype=str)
    matrix = [[0 for j in range(edge)] for i in range(edge)]
    return matrix


def init_matrix(matrix, set_key_list):
    '''初始化矩阵，将关键词集合赋值给第一列和第二列'''
    matrix[0][1:] = np.array(set_key_list)
    matrix = list(map(list, zip(*matrix)))
    matrix[0][1:] = np.array(set_key_list)
    # print(matrix)
    return matrix


def count_matrix(matrix, formated_data):
    '''计算各个关键词共现次数'''
    keywordlist = matrix[0][1:]  # 列出所有关键词
    appeardict = {}  # 每个关键词与 [出现在的行(formated_data)的list] 组成的dictionary
    for w in keywordlist:
        appearlist = []
        i = 0
        for each_line in formated_data:
            if w in each_line:
                appearlist.append(i)
            i += 1
        appeardict[w] = appearlist
    for row in range(1, len(matrix)):
        # 遍历矩阵第一行，跳过下标为0的元素
        for col in range(1, len(matrix)):
            # 遍历矩阵第一列，跳过下标为0的元素
            # 实际上就是为了跳过matrix中下标为[0][0]的元素，因为[0][0]为空，不为关键词
            if col >= row:
                # 仅计算上半个矩阵
                if matrix[0][row] == matrix[col][0]:
                    # 如果取出的行关键词和取出的列关键词相同，则其对应的共现次数为0，即矩阵对角线为0
                    matrix[col][row] = 0
                else:
                    counter = len(set(appeardict[matrix[0][row]]) & set(appeardict[matrix[col][0]]))

                    matrix[col][row] = counter
            else:
                matrix[col][row] = matrix[row][col]
                '''
                矩阵结构详解
                matrix[0][1....n]:是词语
                matrix[1...n][0]:也是词语
                也就是说，第一行和第一列全是词语
                所以
                matrix[1][1]就是 matrix[1][0]和matrix[0][1]的内容
                matrix[3][4]就是 matrix[3][0]和matrix[0][4]的内容
                '''
    return matrix


def community_discover(matrix, path):
    """

    :param path:
    :param matrix:
    :return:
    """
    pure_mat = []
    for i in range(1, len(matrix)):
        pure_mat.append(matrix[i][1:])
    pure_key = matrix[0][1:]
    pure_mat = np.array(pure_mat)

    communities_bron = bron_kerbosch(pure_mat, pivot=True)
    # print(communities_bron)
    print('bron complete!')
    communities_hier = hierarchical_clustering(pure_mat, metric="euclidean", linkage="complete")
    print('hier complete!')
    communities_newman, _ = girvan_newman(pure_mat)
    print('newman complete!')
    communities_louv, _ = louvain_method(pure_mat)
    print('louv complete!')

    comm_bron = [0] * len(pure_key)
    comm_hier = [0] * len(pure_key)
    comm_newman = [0] * len(pure_key)
    comm_louv = [0] * len(pure_key)
    for index, comm in enumerate([communities_bron, communities_hier, communities_newman, communities_louv]):
        for comm_i, inside in enumerate(comm):
            for i in inside:
                if index == 0:
                    comm_bron[i] = comm_i + 1
                elif index == 1:
                    comm_hier[i] = comm_i + 1
                elif index == 2:
                    comm_newman[i] = comm_i + 1
                elif index == 3:
                    comm_louv[i] = comm_i + 1
                    
    df = pd.DataFrame({'id': pure_key,
                       'bron': comm_bron,
                       'hier': comm_hier,
                       'newman': comm_newman,
                       'louv': comm_louv})
    df.to_csv(f'{path}_node_comm.csv', encoding='utf_8_sig', index=None)


def graph_data_output(matrix, path='graph_data'):
    data = pd.DataFrame(matrix)
    data.to_csv(f'{path}.csv', encoding='utf_8_sig', index=None)


@click.command()
@click.argument('xls_dataset_route', nargs=-1)
@click.option('--xls_column', '-c', default=2)
@click.option('--threshold', '-t', default=30)
@click.option('--above', '-a', default=0.5)
@click.option('--save_csv_name', '-n', default='graph_data')
@click.option('-hard', '-c', default=35)
def main(xls_dataset_route, xls_column, threshold, above, save_csv_name, hard):
    """

    :param save_csv_name:
    :param xls_dataset_route:
    :param xls_column:
    :param threshold:
    :param above:
    :param hard:
    :return:
    """
    keyword_path = ''.join(xls_dataset_route)
    data = original_text_load(keyword_path, xls_column=xls_column)
    set_key_list = get_set_key(data, threshold=threshold, above=above, hard_conctrl=int(hard))
    print(len(set_key_list))
    formated_data = format_data(data, set_key_list)
    matrix = build_matirx(set_key_list)
    matrix = init_matrix(matrix, set_key_list)
    result_matrix = count_matrix(matrix, formated_data)
    graph_data_output(result_matrix, path=save_csv_name)
    community_discover(result_matrix, save_csv_name)


if __name__ == '__main__':
    main()
