#-*- coding: UTF-8 -*-
__author__ = 'Jinkey'
from sklearn.feature_extraction.text import CountVectorizer
import jieba
import jieba.analyse
import scipy.sparse as spa
import re


Vectorizer = CountVectorizer(min_df=1)
# print Vectorizer
contents = ['跟猪猪出去玩耍了，我很开心，猪猪也很开心','我来到北京清华大学']

#统计词频函数
def freq(contents):
    features = {}
    for content in contents:
        #清除句子中的英文
        content_clear = re.sub("[a-z]","",content)
        #清除句子中的标点符号
        content_clear = re.sub(u"[，。]","",content_clear.decode("u8"))
        #用jieba库进行分词
        cuts = jieba.cut(content_clear, cut_all=False)
        #统计词频
        for cut in cuts:
            if cut in features:
                features[cut] += 1
            else:
                features[cut] = 1
    return features


def get_feature_names():
    frequence = freq(contents)
    words = []
    for word in frequence:
        words.append(word)
    return words


def CountVectorizer(contents):
    frequences = freq(contents)
    for each in frequences:
            frequences[each] = 0
    vers = []
    for i in range(0,len(contents)):
        frequence = freq([contents[i]])
        for each in frequence:
            frequences[each] = frequence[each]
        vers.append(list(frequences.values()))
    vectorizer = spa.coo_matrix(vers)
    return vectorizer

#获取词频统计结果
CV = CountVectorizer(contents)
print CV.toarray()
for each in get_feature_names():
    print each