import json
from sklearn import feature_extraction
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from domain.default_news import news_after_treat_data


def get_matrix():
    engine = create_engine('sqlite:///database/news.db')
    Session = sessionmaker(bind=engine)
    session = Session()
    news_list = session.query(news_after_treat_data).all()
    session.close()
    list = []
    for new in news_list:
        json_item = json.loads(new.json)
        json_text = ' '.join(json_item)
        list.append(json_text)
    return list

def get_tf_idf(matrix):
    '''
    :param matrix: 需要获取 tf-idf 的分词矩阵
    :return: tf-idf 矩阵
    '''
    vectorizer = CountVectorizer(max_features=5000, ngram_range=(1, 2))  # 该类会将文本中的词语转换为词频矩阵，矩阵元素a[i][j] 表示j词在i类文本下的词频
    transformer = TfidfTransformer()  # 该类会统计每个词语的tf-idf权值
    tfidf = transformer.fit_transform(
        vectorizer.fit_transform(matrix))  # 第一个fit_transform是计算tf-idf，第二个fit_transform是将文本转为词频矩阵
    word = vectorizer.get_feature_names()  # 获取词袋模型中的所有词语
    weight = tfidf.toarray()  # 将tf-idf矩阵抽取出来，元素a[i][j]表示j词在i类文本中的tf-idf权重
    return weight


def start_kmeans_cluster(matrix, n_clusters):
    kmeans = KMeans(n_clusters, random_state=10)
    result = kmeans.fit(matrix)
    return result


if __name__ == '__main__':
    matrix = get_matrix()
    tf_idf_matrix = get_tf_idf(matrix)
    result = start_kmeans_cluster(tf_idf_matrix, 19)
    labels = result.labels_  # 输出标签
    cluster_centers = result.cluster_centers_  # 输出聚类中心
    inertia = result.inertia_  # 输出簇内平方和
