from gensim import corpora
from gensim import models

import similarity.jiebaSegmentationWord as jiebaSegmentationWord
import db.db_init as db
import common.invertedIndex as invertedIndex

"""
@:param corpus_file :语料库；需带读写模式：如r"corpus.txt"
"""


def caculatetfidf(corpus_file=r"corpus.txt"):
    vec_tfidf = []
    # 读取语料库
    corpus = [line for line in open(corpus_file, "r", encoding="utf-8").readlines()]
    # 分词：对语料库的每个语句进行分词
    # 存储分词后的列表
    word_list = []
    original_word_list = []
    for corpu in corpus:
        original_word_list.append(corpu.strip())
        word_list.append(jiebaSegmentationWord.cut(corpu))
    # 计算词袋模型
    dictionary = corpora.Dictionary(word_list)
    num_features = len(dictionary.token2id)
    new_corpus = [dictionary.doc2bow(word) for word in word_list]

    # 将词袋模型的稀疏表示 交给他初始化 - 后期加上缓存并且判断
    tfidf = models.TfidfModel(new_corpus)
    # 可以进行保存 - 如果数据未修改，则下次可以直接load进来，不需要再进行上述操作
    tfidf.save("model/my_model.tfidf")
    # 做判断 - 如果数据未曾修改，则可直接使用
    tfidf = tfidf.load("model/my_model.tfidf")
    # 开始计算 tfidf 值
    for i in new_corpus:
        str_tfidf = tfidf[i]
        vec_tfidf.append(str_tfidf)
    return vec_tfidf, original_word_list, tfidf, dictionary, num_features


def calculate_tdidf_from_db(text, create_time, loadcache=False):
    # 获取语料库 - 包含数据id和语料
    corpus = db.get_corpus_from_db(create_time)

    corpus = invertedIndex.query_by_field_id(text, corpus, "content")
    # 分词
    word_list = [jiebaSegmentationWord.cut(corpu["content"]) for corpu in corpus]

    # 计算词袋模型
    dictionary = corpora.Dictionary(word_list)
    num_features = len(dictionary.token2id)
    new_corpus = [dictionary.doc2bow(word) for word in word_list]
    tfidf = models.TfidfModel(new_corpus)
    if loadcache:
        tfidf = tfidf.load("model/my_model.tfidf")
    else:
        tfidf.save("model/my_model.tfidf")
    print("tfidf模型构建完成")
    # 做判断 - 如果数据未曾修改，则可直接使用
    vector_tfidf = tfidf[new_corpus]
    return vector_tfidf, corpus, tfidf, dictionary, num_features, word_list
