import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from gensim import corpora,models
from scipy.sparse import csr_matrix

# 读取数据
df_train = pd.read_csv('../data/train_processed.csv')
# df_train = pd.read_csv('../data/test.csv')
df_test = pd.read_csv('../data/test_processed.csv')
# label统计柱状图
sns.countplot(x='label',data=df_train)
plt.savefig('../data/label_count.png')

# 训练集和测试集的description一起做关键词提取
all_description = pd.concat([df_train['description'],df_test['description']],  axis=0)
corpus_list_all = all_description.map(lambda x: x.strip().split(' ')).tolist()
# 对label进行分组合并
df_class = df_train[['label', 'description']].groupby(['label']).aggregate(lambda x: ' '.join(x))
corpus_list_class = df_class['description'].map(lambda x: x.strip().split(' ')).tolist()

def getKeywords(corpus_list, corpus_name, top_k):
    dictionary = corpora.Dictionary(corpus_list)
    # 去除词频小于3的词
    small_freq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq < 3]
    dictionary.filter_tokens(small_freq_ids)
    dictionary.compactify()
    # 不知道为什么id2token为空，重新设置下id2token
    dictionary.id2token = dict((v,k) for k,v in dictionary.token2id.items())
    corpus = [dictionary.doc2bow(text) for text in corpus_list]
    # 保存生成的语料
    # corpora.MmCorpus.serialize('../model/'+ corpus_name+ '.mm', corpus)
    corpus_tfidf = []
    id2word = {}
    # tf-idf
    tfidfModel = models.TfidfModel(corpus=corpus, id2word=id2word, dictionary=dictionary)
    # tfidfModel.save('../model/' + corpus_name + '_tfidf.model')
    corpus_tfidf = tfidfModel[corpus]

    lsi_model = models.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=100)
    corpus_lsi = [lsi_model[doc] for doc in corpus]
    data = []
    rows = []
    cols = []
    line_count = 0
    for line in corpus_lsi:
        for elem in line:
            rows.append(line_count)
            cols.append(elem[0])
            data.append(elem[1])
        line_count += 1
    lsi_sparse_matrix = csr_matrix((data, (rows, cols)))
    lsi_matrix = lsi_sparse_matrix.toarray()
    np.savetxt('../model/' + corpus_name + '_lsi.csv', lsi_matrix, delimiter = ',')

    keywords = []
    for doc in corpus_tfidf:
        sorted_words = sorted(doc, key=lambda t: (-t[1], t[0]))
        topk = min(len(sorted_words), top_k)
        keyword_list = [dictionary.id2token[t[0]] for t in sorted_words[:topk]]
        keywords.append(' '.join(keyword_list))
    return keywords
keyword_dic_all = {"keywords" : getKeywords(corpus_list_all, corpus_name='all', top_k=16)}
keyword_dic_class = {"keywords" : getKeywords(corpus_list_class, corpus_name='class', top_k=50)}

keyword_df_all=pd.DataFrame(keyword_dic_all)
keyword_df_class=pd.DataFrame(keyword_dic_class)

keyword_df_all.to_csv('../data/keywords_all.csv',index=False, encoding = 'utf-8')
keyword_df_class.to_csv('../data/keywords_class.csv',index=False, encoding = 'utf-8')