import jieba
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cluster import KMeans
import operator


def seg_text(data):
    for l in data:
        yield ' '.join([w for w in jieba.cut(l.strip().lower()) if len(w) > 0])


def tfidf(x):
    cv = CountVectorizer(token_pattern=r'\b\w+\b', min_df=1)
    x = cv.fit_transform(x)
    tf_idf = TfidfTransformer()
    x = tf_idf.fit_transform(x)
    return x


def kmeans(text, reads, urls, n_cluster=10):
    data = tfidf(seg_text(text))
    if len(text) > n_cluster:
        model = KMeans(n_clusters=n_cluster, random_state=0)
        model.fit(data)
        labels = model.labels_
    else:
        labels = range(0, len(text))

    res = {}
    tmp = []

    for d, l, r, url in zip(text, labels, reads, urls):
        res[str(l)] = res.get(str(l), []) + [(d, r, url)]

    for k, v in res.items():
        tmp.append(([{'title': t, 'read_num': r, 'url': url} for t, r, url in
                     sorted(v, key=operator.itemgetter(1), reverse=True)], sum([m[1] for m in v])))

    res = [{'title_read': k, 'sum_read': v} for k, v in sorted(tmp, key=operator.itemgetter(1), reverse=True)]
    return res
