import jieba
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cluster import KMeans


def seg_text(data):
    for l in data:
        yield ' '.join([w for w in jieba.cut(l.strip().lower()) if len(w) > 0])


def tfidf(X):
    CV = CountVectorizer(token_pattern=r'\b\w+\b', min_df=1)
    X = CV.fit_transform(X)
    TFIDF = TfidfTransformer()
    X = TFIDF.fit_transform(X)
    return X


def kmeans(data):
    data = tfidf(seg_text(data))
    model = KMeans(n_clusters=10, random_state=0)
    model.fit(data)
    res = {}
    for d, l in zip(data, model.labels_):
        res[l] = res.get(l, []).append(d)
    return res


if __name__ == '__main__':
    input_file = sys.argv[1]
    titles = []
    with open(input_file) as fd:
        titles = [l.strip() for l in fd.readlines() if len(l.strip()) > 0]

    data = tfidf(seg_text(titles))
    model = KMeans(n_clusters=10, random_state=0)
    model.fit(data)
    for d, l in zip(titles, model.labels_):
        print(d + '\t' + str(l))
