from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba

def count_demo():
    data = ['life is short,i like python', 'life is too long,i dislike python']
    transfer = CountVectorizer(stop_words=['is', 'too'])
    data_new = transfer.fit_transform(data)
    # 获取特征名称['dislike', 'is', 'life', 'like', 'long', 'python', 'short', 'too']
    feature = transfer.get_feature_names()
    print(feature)
    print(data_new.toarray())

# 返回的是单词的重要程度
def tfidf_demo():
    data = ['life is short,i like python', 'life is too long,i dislike python']
    transfer = TfidfVectorizer(stop_words=['is', 'too'])
    data_new = transfer.fit_transform(data)
    # 获取特征名称['dislike', 'is', 'life', 'like', 'long', 'python', 'short', 'too']
    feature = transfer.get_feature_names()
    print(feature)
    print(data_new.toarray())
# 结巴分词
def cut_word(text):
    return ' '.join(list(jieba.cut(text)))
def count_chinese_demo():
    data = ['我爱北京天安门', '天安门上太阳升']
    data_cut = []
    for sent in data:
        data_cut.append(cut_word(sent))
    transfer = CountVectorizer()
    data_new = transfer.fit_transform(data_cut)
    # 获取特征名称['北京', '天安门', '太阳升']
    feature = transfer.get_feature_names()
    print(feature)
    print(data_new.toarray())

if __name__ == '__main__':
    tfidf_demo()
