from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from collections import Counter
from tool.es_clent import connect_es, store_data_bulk
import math
import json
from tool.string_tool import clean_des


def cal_term_weight():
    # 获取所有的语料,放入corpus
    with open("../data/apis.json") as file:
        api_datas = json.load(file)
        corpus = [clean_des(api["description"]) if api["description"] else "" for api in api_datas]
    # 将文本中的词语转换为词频矩阵
    vectorizer = CountVectorizer(stop_words="english")
    X = vectorizer.fit_transform(corpus)
    # 获取词袋中所有文本关键词
    word = vectorizer.get_feature_names()
    word_id = {}
    # 输出非零元素的列
    row_index, column_index = X.nonzero()
    total_doc_num = row_index[-1]+1

    # 统计非零元素的列的个数
    c = Counter()
    c.update(column_index)

    # 生成对应单词到个数的表
    word_count = {}
    for key in c:
        word_count[word[key]] = math.log((total_doc_num*1.0)/c[key], 10)

    # 生成插入数据，插入进es库中
    insert_data = []
    for key in word_count:
        word_data = {
            "word": key,
            "sim_words": [],
            "idf": word_count[key]
        }
        insert_data.append(word_data)
    return insert_data


if __name__ == "__main__":
    es = connect_es()
    insert_data = cal_term_weight()
    store_data_bulk(es_object=es, index_name="sim_word", doc_type="sim_word", data_list=insert_data)
