#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/4/27 下午4:58
# @Author  : zhangzhen
# @Site    : 
# @File    : kmeans.py
# @Software: PyCharm
import sys
import matplotlib.pyplot as plt
from collections import defaultdict
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from com.corpus.corpus_utils import corpus_utils
from com.utils.ioutils import ioutils
from com.corpus import corpus
from com.textrank4zh.TextRank4Sentence import TextRank4Sentence
try:
    reload(sys)
    sys.setdefaultencoding('utf-8')
except:
    pass


def train(X, vectorizer, true_k=10, minibatch=False, showLable=False, save_plt=False):
    # 使用采样数据还是原始数据训练k-means，
    if minibatch:
        km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=False)
    else:
        km = KMeans(n_clusters=true_k, init='k-means++', max_iter=300, n_init=1, verbose=False)
    # 开始聚类
    km.fit(X)

    # 中心点
    # print '质心点:'
    # print(km.cluster_centers_)
    result = list(km.predict(X))
    print (dict([(i, result.count(i)) for i in result]))

    if showLable:
        print ('Cluster distribution:')
        print ("Top terms per cluster:")
        # tf-idf 高的词汇分布情况
        order_centroids = km.cluster_centers_.argsort()[:, ::-1]
        # print order_centroids
        terms = vectorizer.get_feature_names()
        print (vectorizer.get_stop_words())  # 停用词
        for i in range(true_k):
            print 'Cluster  %d:' % i,
            for ind in order_centroids[i, :20]:
                print ' %s' % terms[ind],
            print
        print '--------------------------------------------------------------------------------------------------'

    # print (result)
    if save_plt:
        weight = X.toarray()
        pca = PCA(n_components=4)  # 输出两维
        newData = pca.fit_transform(weight)  # 载入N维
        points_dict_x = defaultdict(list)
        points_dict_y = defaultdict(list)
        for i, type in enumerate(result):
            points_dict_x[type].append(newData[i][0])
            points_dict_y[type].append(newData[i][1])
        # 将分类的点进行绘制
        colors = ['ob', 'or', 'og', 'ok', 'om', 'oc', 'oy',
                  '^b', '^r', '^g', '^k', '^m', '^c', '^y',
                  '*b', '*r', '*g', '*k', '*m', '*c', '*y']
        for i in range(true_k):
            x = points_dict_x[i]
            y = points_dict_y[i]
            plt.plot(x, y, colors[i])
        plt.savefig('knn_N%s_K%s.png' % (str(1000), str(true_k)), bbox_inches='tight', dpi=200)
        plt.show()
    # print points_dict_x
    # print points_dict_y

    return -km.score(X), result


def transform(dataset, n_features=1000):
    vectorizer = TfidfVectorizer(max_df=0.9, max_features=n_features, min_df=1, use_idf=True)
    X = vectorizer.fit_transform(dataset)
    return X, vectorizer


def kmeans(corpus, true_k=1, save_plt=False):
    X, vectorizer = transform(corpus, n_features=1000)
    score, types = train(X, vectorizer, true_k=true_k, showLable=True, save_plt=save_plt)
    score = score / len(corpus)
    print (score)


def scan_k(corpus, type):
    print("%d documents" % len(corpus))
    X, vectorizer = transform(corpus, n_features=1000)
    true_ks = []
    scores = []
    for i in xrange(2, 80, 1):
        score, types = train(X, vectorizer, true_k=i)
        score = score / len(corpus)
        print (i, score)
        true_ks.append(i)
        scores.append(score)
    plt.figure(figsize=(8, 4))
    plt.plot(true_ks, scores, label="error", color="red", linewidth=1)
    plt.xlabel("n_features")
    plt.ylabel("error")
    plt.legend()
    plt.savefig('scan_N%s_K%s.png' % (str(1000), type), bbox_inches='tight', dpi=200)
    # plt.show()


def showdist(corpus, types):
    vectorizer = TfidfVectorizer(max_df=0.5, max_features=10000, min_df=2, use_idf=True)
    X = vectorizer.fit_transform(corpus)
    weight = X.toarray()
    pca = PCA(n_components=2)  # 输出两维
    newSet = pca.fit_transform(weight)  # 载入N维

    points_dict_x = defaultdict(list)
    points_dict_y = defaultdict(list)

    for i, type in enumerate(types):
        points_dict_x[type].append(newSet[i][0])
        points_dict_y[type].append(newSet[i][1])
    t_len = len(points_dict_y)
    # 将分类的点进行绘制
    colors = ['ob', 'or', 'og', 'ok', 'om', 'oc', 'oy',
              '^b', '^r', '^g', '^k', '^m', '^c', '^y',
              '*b', '*r', '*g', '*k', '*m', '*c', '*y']

    for i in range(t_len):
        x = points_dict_x[i]
        y = points_dict_y[i]
        plt.plot(x, y, colors[i])
    plt.savefig('knn_N%s_K%s.png' % (str(1000), str(t_len)), bbox_inches='tight', dpi=200)
    plt.show()


def kmeans_improve(corpus, type,size=20, alpha=0.15, times=100):
    rest_flag = True  # 是否有多余
    stat_num = 0
    while corpus.get_len() > size:
        X, vectorizer = transform(corpus.get_corpus())
        # 使用原始数据训练k-means
        # print "语料长度: ",self.__corpus.get_len()
        k = corpus.get_len()/size
        if k * size < corpus.get_len():
            k = k+1
        # 聚类
        km = KMeans(n_clusters=k, init='k-means++', max_iter=300, n_init=1, verbose=False)
        time = 0
        cur_result = []
        cur_count = 0
        flag = True
        while time < times:
            time = time + 1
            # 开始聚类,对聚类数量进行限制
            km.fit(X)
            result = list(km.predict(X))
            rs = dict([(i, result.count(i)) for i in result])
            tmp = [k for k, v in rs.iteritems() if size * (1 - alpha) <= v <= size * (1 + alpha)]
            if len(tmp) > cur_count:
                flag = False
                cur_result = result
                cur_count = len(tmp)
        if flag:  # 只有一类的情况 或者 不满足聚类规模限定
            print u'只有一类的情况 或者 不满足聚类规模限定'
            # rs = dict([(i, result.count(i)) for i in result])
            print rs
            tmp = [k for k, v in rs.iteritems() if size * (1 - alpha) <= v <= size * (1 + alpha)]
            if len(tmp) > 0:
                corpus.refresh([result])
                rest_flag = False
                stat_num = 0
                break
            else:
                # 不满足分类规模,对大类继续分类
                cur_dict = defaultdict(list)
                for k, v in enumerate(result):
                    cur_dict[v].append(k)
                cur_count = 0
                cur = 0
                for k, v in cur_dict.iteritems():
                    if len(v) > cur_count:
                        cur_count = len(v)
                        cur = k

                stat_num += 1
                # 如果
                if stat_num > 5:
                    corpus.refresh([cur_dict[cur]])
                    rest_flag = False
                    break
                else:
                    corpus.refresh_corpus(cur_dict[cur])
                pass
        else:  # 继续划分
            cur_dict = defaultdict(list)
            for k, v in enumerate(cur_result):
                cur_dict[v].append(k)
            # 保存各个聚点的数据 索引
            res = []
            for k, v in cur_dict.iteritems():
                if size * (1 - alpha) <= len(v) <= size * (1 + alpha):
                    res.append(v)
            corpus.refresh(res)

    if rest_flag:  # 存在剩余的语料
        res = []
        res.append([k for k, v in enumerate(corpus.get_cur_index())])
        corpus.refresh(res)

    result_show(corpus, type)

def result_show(corpus,type):
    total = 0
    texts = []
    cent_sents = []
    for cur, indices in enumerate(corpus.get_cluster_result()):
        pos_corpus = corpus.get_pos_corpus_by_indies(indices)
        for c in pos_corpus:
            texts.append(str(cur)+'\t'+c)
        print "\n第%s类" % (cur), "个数%d" % (len(pos_corpus))
        # 关键词生成方法

        text = ' '.join(pos_corpus)
        tr4s = TextRank4Sentence()
        tr4s.analyze(text=text, lower=True, source='all_filters')

        print "Sentences:"
        for item in tr4s.get_key_sentences(num=5):
            cent = str(cur)+'\t'+str(item.weight)+'\t'+item.sentence
            cent_sents.append(cent)
            print cur, '\t', item.weight, '\t', item.sentence
        total += len(pos_corpus)
        print 100*"="
    ioutils.write2loacl(texts, '../../data/'+type+'_result_improve.txt')
    ioutils.write2loacl(cent_sents, '../../data/cent_'+type+'.txt')
    print "total,", total

if __name__ == '__main__':
    print "========================================Mian========================================"
    # for i in range(7):
    #     c = corpus.corpus('../../data/', str(i))
    #     print '所有语料数据:', len(c.get_corpus())
    #     print c.get_corpus()[0]
    type = 0
    c = corpus.corpus('../../data/', str(type))
    kmeans_improve(c, str(type), size=100, alpha=0.25)
        # scan_k(c.get_corpus(), str(i))
    # for i in range(6, 0, -1):
    #     c = corpus.corpus('../../data/', str(i))
    #     kmeans_improve(c, str(i), size=35, alpha=0.25)


