import numpy as np
import time
import xlrd
from datetime import datetime
import click
import os
import xlrd
import logging
import re
import jieba
from gensim.corpora import Dictionary
from harvesttext import HarvestText
from harvesttext.resources import get_baidu_stopwords
from tqdm import tqdm
from pandas.core.frame import DataFrame
import pandas as pd
import itertools


ht = HarvestText()


def clean(text):
    """
    无效字符清理
    :param text:
    :return:
    """
    cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9,.，。！？!?()（）@《》]")
    # 匹配不是中文、大小写、数字的其他字符
    string1 = cop.sub('', text)
    return string1


def original_text_load(path, xls_column=2, time_column=4):
    """
    读取多个xls文件并聚合。内容来自meiya system
    # parameter

    path : `str`
        文件夹的路径

    # return

    docs_list : `List[str]`
        文档的列表

    """
    file_list = os.listdir(path)
    file_list = [i for i in file_list if not i.startswith('.') and i.endswith('.xls')]
    # 去除隐藏文件无关项
    docs_list = []
    time_list = []

    logging.info(f'开始加载数据 {path}')
    print('开始加载数据\n')
    for xls_file in tqdm(file_list):
        work_book = xlrd.open_workbook(os.path.join(path, xls_file))
        work_sheets = work_book.sheets()  # 默认第一个工作表
        for sheet in work_sheets:
            col_data = sheet.col_values(xls_column)
            col_data.pop(0)  # 删除表头
            time_data = sheet.col_values(time_column)
            time_data.pop(0)
            time_list = time_list + time_data
            docs_list = docs_list + col_data

    only = set()
    clean_doc_list = []
    clean_doc_time = []
    logging.info('清洗数据 开始')
    print('开始 清洗数据\n')
    for index, doc in tqdm(enumerate(docs_list)):
        if hash(clean(doc)[:10]) not in only:  # 确认不重复
            only.add(hash(clean(doc)[:10]))
        else:
            continue
        tmp_text = ht.clean_text(doc).replace('...', '').replace('#', '')
        if len(tmp_text) <= 7:  # 小于七个中文字符直接放弃
            continue

        clean_doc_list.append(tmp_text)
        clean_doc_time.append(time_list[index])

    logging.info(f'全部 doc_length: {len(docs_list)}')
    logging.info(f'有效 doc_length: {len(clean_doc_list)}')

    stop_word = list(get_baidu_stopwords())
    cut_docs = []
    for doc in tqdm(clean_doc_list):  # 首先进行分词
        doc = re.sub('[^\u4e00-\u9fa5]+', ' ', doc)
        cut_doc = list(jieba.cut_for_search(doc))
        cut_doc = [i for i in cut_doc if i not in stop_word and i != ' ' and len(i) >= 2]
        # 去除停用词
        cut_docs.append(' '.join(cut_doc))  # 为了配合 get_set_key

    return cut_docs, clean_doc_time


def get_set_key(data, threshold=30, above=0.5, hard_conctrl=0):
    """
    选取频数大于等于Threshold的关键词构建一个集合，用于作为共现矩阵的首行和首列
    :param hard_conctrl:
    :param data:
    :param threshold:
    :param above:
    :return:
    """
    all_key = ' '.join(data)  # 把所有数据串联起来
    key_list = [it.strip() for it in all_key.strip().split(' ')]  # 分割出所有的词语
    keys = set(key_list)  # 去重
    dic = dict(zip(keys, [key_list.count(k) for k in keys]))  # 基础频次统计
    dict_all = Dictionary([i.split(' ') for i in data])
    proportion = dict(zip(keys, [dict_all.dfs[dict_all.token2id[i]] / len(data) for i in keys]))

    wf = {k: v for k, v in dic.items() if k != '' and v >= threshold and proportion[k] <= above}
    set_key_list = []
    for a in sorted(wf.items(), key=lambda item: item[1], reverse=True):
        set_key_list.append(a[0])

    if hard_conctrl:
        set_key_list = set_key_list[:hard_conctrl]
    return set_key_list


def format_data(data, set_key_list):
    """
    格式化需要计算的数据，将原始数据格式转换成二维数组
    :param data:
    :param set_key_list:
    :return:
    """

    formated_data = []
    for ech in data:
        ech_line = ech.split(' ')

        temp = []  # 筛选出format_data中属于关键词集合的词
        for e in ech_line:
            if e in set_key_list:
                temp.append(e)
        ech_line = temp
        ech_line = list(set(filter(lambda x: x != '', ech_line)))  # set去掉重复数据
        formated_data.append(ech_line)
    return formated_data


def graph_data_output(matrix, path='graph_data'):
    data = pd.DataFrame(matrix)
    data.to_csv(f'{path}.csv', encoding='utf_8_sig')


def timed_edge_count(doc_list, key_list, time_data, path):
    """

    :param path:
    :param time_data:
    :param doc_list:
    :param key_list:
    :return:
    """
    label_start = []
    label_end = []
    time_list = []

    for index, doc in tqdm(enumerate(doc_list)):
        tmp_comb = list(itertools.combinations(doc, 2))
        for t in tmp_comb:
            if t[0] not in key_list or t[1] not in key_list:
                continue
            label_start.append(t[0])
            label_end.append(t[1])
            time_list.append(time_data[index])

    format_time = []
    for t in time_list:
        format_time.append(t.replace(' ', 'T') + 'Z')

    df = DataFrame({'Source': label_start,
                    'Target': label_end,
                    'Timestamp': format_time})
    df.to_csv(f'{path}.csv', encoding='utf_8_sig', index=None)


def timed_node_find(doc_list, key_list, time_data, path):
    """

    :param doc_list:
    :param key_list:
    :param time_data:
    :param path:
    :return:
    """
    start_key = {}
    end_key = {}
    time_format = '%Y-%m-%dT%H:%M:%SZ'
    time_format_sheet = '%Y-%m-%d %H:%M:%S'
    for key in key_list:
        time_start = datetime.strptime('2222-01-01T00:00:00Z', time_format)
        time_end = datetime.strptime('1000-01-01T00:00:00Z', time_format)
        end_key[key] = time_end
        start_key[key] = time_start

    for index, doc in enumerate(doc_list):
        for word in doc:
            if end_key[word] < datetime.strptime(
                    time_data[index], time_format_sheet):
                end_key[word] = datetime.strptime(time_data[index], time_format_sheet)
            if start_key[word] > datetime.strptime(
                    time_data[index], time_format_sheet):
                start_key[word] = datetime.strptime(time_data[index], time_format_sheet)

    for key in start_key:
        start_key[key] = datetime.strftime(start_key[key], time_format)
    for key in end_key:
        end_key[key] = datetime.strftime(end_key[key], time_format)

    start_col = []
    end_col = []
    for key in key_list:
        start_col.append(start_key[key])
        end_col.append(end_key[key])

    df = DataFrame({
        'id': key_list,
        'startStamp': start_col,
        'endStamp': end_col
    })
    df.to_csv(f'{path}_node.csv', encoding='utf_8_sig', index=None)
    print('node save!')


@click.command()
@click.argument('xls_dataset_route', nargs=-1)
@click.option('--xls_column', '-c', default=2)
@click.option('--threshold', '-t', default=30)
@click.option('--above', '-a', default=0.5)
@click.option('--save_csv_name', '-n', default='graph_data')
@click.option('-hard', '-c', default=35)
def main(xls_dataset_route, xls_column, threshold, above, save_csv_name, hard):
    """

    :param xls_dataset_route:
    :param xls_column:
    :param threshold:
    :param above:
    :param save_csv_name:
    :param hard:
    :return:
    """
    keyword_path = ''.join(xls_dataset_route)
    data, time_data = original_text_load(keyword_path, xls_column=xls_column)
    set_key_list = get_set_key(data, threshold=threshold, above=above, hard_conctrl=int(hard))
    print(len(set_key_list))
    formated_data = format_data(data, set_key_list)
    timed_edge_count(formated_data, set_key_list, time_data, save_csv_name)
    timed_node_find(formated_data, set_key_list, time_data, save_csv_name)

if __name__ == '__main__':
    main()
