# from sklearn.feature_extraction.text import TfidfVectorizer
#
#
# corpus = [
#     "I am not your father",
#     "我 是 你 爸爸"
# ]
# tf_idf_vectorizer = TfidfVectorizer()
# X = tf_idf_vectorizer.fit_transform(corpus)
# print(X)
# print(X.toarray())
# print(tf_idf_vectorizer.get_feature_names())
#
# print('*' * 50)
# Y = tf_idf_vectorizer.transform(corpus)
# print(Y.toarray())
# print(tf_idf_vectorizer.get_feature_names())
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import logging


jieba.setLogLevel(logging.INFO)


def cut_word(text):
    """
    对中文进行分词
    "我爱北京天安门"————>"我 爱 北京 天安门"
    :param text:
    :return: text
    """
    # 用结巴对中文字符串进行分词
    text = " ".join(list(jieba.cut(text)))

    return text


def text_chinese_tfidf_demo():
    """
    对中文进行特征抽取
    :return: None
    """
    data = ["一种还是一种今天很残酷。",
            "我们看到的东西。",
            "如果只用一种方式了解某样事物。"]
    # 将原始数据转换成分好词的形式
    text_list = []
    for sent in data:
        text_list.append(cut_word(sent))
    print(text_list)

    # 1、实例化一个转换器类
    # transfer = CountVectorizer(sparse=False)
    transfer = TfidfVectorizer(stop_words=['一种', '不会', '不要'])
    # 2、调用fit_transform
    data = transfer.fit_transform(text_list)
    print("文本特征抽取的结果：\n", data.toarray())
    print("返回特征名字：\n", transfer.get_feature_names())

    return None


if __name__ == '__main__':
    text_chinese_tfidf_demo()
