import os
# from typing import List
# from mlxtend.frequent_patterns import fpgrowth
# from mlxtend.frequent_patterns import apriori
import xlrd
import logging
import time
import re
import jieba
from harvesttext import HarvestText
from harvesttext.resources import get_baidu_stopwords
from tqdm import tqdm
import jieba
from gensim import corpora
from gensim.models import LdaModel
# from gensim.corpora import Dictionary
from gensim.models import CoherenceModel
# from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
# from gensim.models.doc2vec import Doc2Vec
import gensim
# from sys import getsizeof as getsize
import click

TaggedDocument = gensim.models.doc2vec.TaggedDocument

ht = HarvestText()


def clean(text):
    """
    无效字符清理
    :param text:
    :return:
    """
    cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9,.，。！？!?()（）@《》]")  # 匹配不是中文、大小写、数字的其他字符
    string1 = cop.sub('', text)
    return string1


def original_text_load(path, xls_column=2):
    """

    应该是要把多个csv一起读，然后拼凑到一起
    通过文件夹指定时间区间，而这个函数只负责聚合所有文件夹里的文件
    读成一个dataframe是2.0的方案

    但是目前来说，保留其他信息并没用。所以还是输出文档列表比较好。

    # parameter

    path : `str`
        文件夹的路径

    # return

    docs_list : `List[str]`
        文档的列表

    """
    file_list = os.listdir(path)
    file_list = [i for i in file_list if not i.startswith('.') and i.endswith('.xls')]  # 去除隐藏文件无关项
    docs_list = []
    tag_list = []

    logging.info(f'开始加载数据 {path}')
    print('开始加载数据\n')
    for xls_file in tqdm(file_list):
        work_book = xlrd.open_workbook(os.path.join(path, xls_file))
        work_sheets = work_book.sheets()  # 默认第一个工作表
        for sheet in work_sheets:
            col_data = sheet.col_values(xls_column)
            col_data.pop(0)  # 删除表头
            docs_list = docs_list + col_data
        tag_list += [xls_file.replace('.xls', '')] * len(docs_list)  # 制作标签列表

    only = set()
    clean_doc_list = []
    clean_tag_list = []
    logging.info('清洗数据 开始')
    print('开始 清洗数据\n')
    for index, doc in tqdm(enumerate(docs_list)):
        if hash(clean(doc)[:10]) not in only:  # 确认不重复
            only.add(hash(clean(doc)[:10]))
        else:
            continue
        tmp_text = ht.clean_text(doc).replace('...', '').replace('#', '')
        if len(tmp_text) <= 7:  # 小于七个中文字符直接放弃
            continue
        clean_doc_list.append(tmp_text)
        clean_tag_list.append(tag_list[index])

    logging.info(f'全部 doc_length: {len(docs_list)}')
    logging.info(f'有效 doc_length: {len(clean_doc_list)}')
    return clean_doc_list, clean_tag_list


def LDA_topic_find(doc_list, topic_num=11, epoch=50) -> LdaModel:
    """
    LDA 主题分析，这本来是一个比较复杂的问题。
    涉及到多个超参数和一些额外判断的技巧。
    LDA只会弄出主题，但是这个主题到底是什么，是否值得关注，
    都需要你自己判断。

    # parameter

    doc_list : `List[str]`
        文档列表

    topic_num : `int`
        考虑有多少个主题, 最大值
        属于超参数

    epoch : `int`
        训练的轮次
        属于超参数

    # return

    lda_model : gensim.models.LdaModel
        最终训练好的模型

    """

    stop_word = list(get_baidu_stopwords())
    cut_docs = []
    for doc in tqdm(doc_list):  # 首先进行分词
        doc = re.sub('[^\u4e00-\u9fa5]+', ' ', doc)
        # cut_doc = jieba.lcut(doc, cut_all=True)
        cut_doc = list(jieba.cut_for_search(doc))
        cut_doc = [i for i in cut_doc if i not in stop_word and i != ' ' and len(i) >= 2]  # 去除停用词
        cut_docs.append(cut_doc)

    print(cut_docs[0])
    dictionary = corpora.Dictionary(cut_docs)
    dictionary.filter_extremes(no_below=20, no_above=0.5)
    corpus = [dictionary.doc2bow(doc) for doc in cut_docs]

    coherence_score = []
    # perplexity = []
    model_list = []

    print('开始 寻找最佳主题')
    for num_topic in tqdm(range(1, topic_num, 1)):
        lda_model = LdaModel(corpus=corpus,
                             id2word=dictionary,
                             num_topics=num_topic,
                             passes=epoch)
        model_list.append(lda_model)
        coherence_model = CoherenceModel(model=lda_model,
                                         texts=cut_docs,
                                         dictionary=dictionary,
                                         coherence='c_v')
        coherence_score.append(round(coherence_model.get_coherence(), 10))
        print(f'主题 {num_topic}, 分数为：{coherence_model.get_coherence()}')
        topics = lda_model.print_topics(num_words=10)
        logging.info(f'主题 {num_topic}, 分数为：{coherence_model.get_coherence()}')
        for topic in topics:
            print(topic)
            logging.info(topic)

    max_score, max_score_index = max(coherence_score), coherence_score.index(max(coherence_score))
    logging.info(f'最大分数：{max_score}, 索引为：{max_score_index}')
    best_lda_model = model_list[max_score_index]

    beat_topics = best_lda_model.print_topics(num_words=10)
    print('here is the best corr result:')
    logging.info('here is the best corr result:')
    for topic in beat_topics:
        print(topic)
        logging.info(topic)

    for index, score in enumerate(coherence_score):
        print(score)
        logging.info(f'主题 {index + 1}, 分数为：{score}')

    return lda_model


@click.command()
@click.argument('xls_dataset_route', nargs=-1)
@click.option('--xls_column', '-c', default=2)
@click.option('--topic_max', '-tm', default=12)
@click.option('--lda_epoch', '-e', default=50)
def main(xls_dataset_route, xls_column, topic_max, lda_epoch):
    xls_dataset_route = ''.join(xls_dataset_route)
    logging.basicConfig(
        level=logging.DEBUG,
        filemode='a',
        filename=f'./{time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())}_{str(os.path.basename(xls_dataset_route))}.log',
        format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )

    text_list, _ = original_text_load(path=xls_dataset_route, xls_column=xls_column)
    print(text_list[0])
    topics = LDA_topic_find(text_list, topic_num=topic_max, epoch=lda_epoch + 1)


if __name__ == '__main__':
    main()
