#-*- coding: UTF-8 -*-
from __future__ import unicode_literals
__author__ = 'Jinkey'

import scipy as sp
import scipy.sparse as spa
import nltk.stem

txts = ['This is a toy post about machine learning. Actually, it contains not much interesting stuff.', 'Imaging databases provide storage capabilities.', 'Most imaging databases safe images permanently.', 'Imaging databases store data.', 'Imaging databases store data. Imaging databases store data. Imaging databases store data.','Imaging databases can get huge.','hello Imaging databases!','What is databases?']
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.cluster import KMeans


#自定义可以进行词干处理的普通向量机
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedCountVectorizer(CountVectorizer):
    def build_analyzer(self):
        analyzer = super(StemmedCountVectorizer, self).build_analyzer()
        #词干处理
        return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))

#【备选】自定义可以进行词干处理的TF-IDF向量机
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfCountVectorizer(TfidfVectorizer):
    def build_analyzer(self):
        analyzer = super(StemmedCountVectorizer, self).build_analyzer()
        #词干处理
        return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))

#实例化普通向量机并传入最小出现阈值和停用词
vectorizer = StemmedCountVectorizer(min_df=1, stop_words='english')
#【备选】实例化TF-IDF向量机并传入最小出现阈值和停用词
tfidfvectorizer = StemmedTfidfCountVectorizer(min_df=1, stop_words='english')

#建模
txt_train = vectorizer.fit_transform(txts)

#获取建模矩阵的行列数
col, row = txt_train.shape

#将普通矩阵转化成稀疏矩阵的测试代码（与脚本无关）
# txt_trains = [[0,1,1],
#               [1,2,3]]
# txt_trains = spa.coo_matrix(txt_trains)

#打印分词结果
print vectorizer.get_feature_names()

#传入测试集建模并转化为稀疏矩阵
txt_test = vectorizer.transform(['store data'])

#将训练集元素和测试集逐个对比
for i in range(0, len(txts)):
    #获取训练集第i个元素用于打印说明文本
    txt = txts[i]
    #获取建模矩阵的第i行
    txt_row = txt_train.getrow(i)
    #归一化，去除句子重复出现对统计结果的干扰
    txt_row_norm = txt_row/sp.linalg.norm(txt_row.toarray())
    txt_test_norm = txt_test/sp.linalg.norm(txt_test.toarray())
    #训练集和测试集归一化向量相减
    delta = txt_row_norm - txt_test_norm
    #调用scipy的两点间距离公式计算相似度
    distance = sp.linalg.norm(delta.toarray())
    print '===文本%i与测试样本的最小距离为%.2f: %s'%(i,distance,txt)


#K邻近聚类
print '='*15,'K-Clusters','='*15
km = KMeans(n_clusters=3,init='random',n_init=1,verbose=1)
km.fit(txt_train)
print '聚类结果向量 ' + str(km.labels_)
print '训练集大小   ' + str(km.labels_.shape[0])

#传入测试集（必须是列表类型，否则结果很奇葩）
K_txt = ['Medical Image Databases covers the new technologies of biomedical imaging databases and their applications in clinical services, education, and research. Authors were selected because they are doing cutting-edge basic or technology work in relevant areas. This was done to infuse each chapter with ideas from people actively investigating and developing medical image databases rather than simply review the existing literature. The authors have analyzed the literature and have expanded on their own research. They have also addressed several common threads within their generic topics.']
#向量化测试集
K_test = vectorizer.transform(K_txt)
#获取预测的簇标签
K_test_label = km.predict(K_test)[0]
#取出与预测簇标签一致的训练集元素
similar_indices = (km.labels_ == K_test_label).nonzero()[0]

print '预测新传入样本属于簇的标签为： ' + str(K_test_label)
print '训练集中该簇成员有： ' + str(similar_indices)

#遍历测试集与该簇中所有训练集元素的相似度并且打印最相似和最不相似的元素
similar = []
for i in similar_indices:
    #两点间距离求相似度
    distance = sp.linalg.norm((K_test - txt_train.getrow(i)).toarray())
    similar.append((distance, txts[i]))
#按相似度排序训练集中的样本
similar = sorted(similar)
print '与训练集最相似的训练集样本为 ' + str(similar[0][1]) + '\n相似度为 ' + str(similar[0][0])
print '.'*30
print '与训练集相似度中等的训练集样本为 ' + str(similar[int(len(similar)/2)][1])  + '\n相似度为 ' + str(similar[int(len(similar)/2)][0])
print '.'*30
print '与训练集最不相似的训练集样本为 ' + str(similar[-1][1])  + '\n相似度为 ' + str(similar[-1][0])