import sklearn.feature_extraction.text
from sklearn import feature_extraction
import jieba

"""
第一部分为字典特征提取

if __name__ == "__main__":
    # 字典提取方法为DictVectorizer
    dict_list = [{"city": "beijing",
                  "temperature": 100},
                 {"city": "shanghai",
                  "temperature": 70},
                 {"city": "guangzhou",
                  "temperature": 40}]
    # 传入字典或字典的迭代器， 默认返回值为sparse矩阵(稀疏矩阵, 其作用是只显示非零数值，节省内存)
    dictvectorizer = feature_extraction.DictVectorizer(sparse=False)
    new_dict = dictvectorizer.fit_transform(dict_list)
    print("特征矩阵: \n", new_dict)
    print("特征名字: \n", dictvectorizer.get_feature_names_out(new_dict))
    old_dict = dictvectorizer.inverse_transform(new_dict)
    print("原字典: \n", old_dict)
    old2_dict = dictvectorizer.transform(old_dict)
    print("原字典(再次转换): \n", old2_dict)
"""
"""
第二部分为文本特征提取
1
if __name__ == "__main__":
    text1 = ["To be or not to be", "that is the question"]
    countvectorizer = feature_extraction.text.CountVectorizer()  # 参数为停用词列表， 可以将无需统计的单词进行忽略
    text1_feature = countvectorizer.fit_transform(text1)
    print(text1_feature)  # 返回稀疏特征矩阵
    print(text1_feature.toarray())
    text1_old = countvectorizer.inverse_transform(text1_feature)
    print(text1_old)  # 返回稀疏矩阵转化前
    text1_feature_name = countvectorizer.get_feature_names_out(text1)
    print(text1_feature_name)  # 返回特征值对应的单词.英文文本特征部分
2.中文文本特征部分
def cut_word(text):
    进行中文分词
    :param text:文本
    :return:分词后的文本
    text_change = list(jieba.cut(text))
    return text_change


if __name__ == "__main__":
    text = "镜中月是天上月，眼前人是心上人"
    text_new = [" ".join(cut_word(text))]
    countvectorizer = feature_extraction.text.CountVectorizer()  # 参数为停用词列表， 可以将无需统计的单词进行忽略
    text1_feature = countvectorizer.fit_transform(text_new)
    print(text1_feature)  # 返回稀疏特征矩阵
    print(text1_feature.toarray())
    text1_old = countvectorizer.inverse_transform(text1_feature)
    print(text1_old)  # 返回稀疏矩阵转化前
    text1_feature_name = countvectorizer.get_feature_names_out(text_new)
    print(text1_feature_name)  # 返回特征值对应的单词.中文文本特征部分
"""
"""
TF——idf文本特征提取词频逆向文档频率
即在一篇文章中出现次数较多而在其他文章中出现少的词， 即关键词
"""
def cut_word(text):
    """
    进行中文分词
    :param text:文本
    :return:分词后的文本
    """
    text_change = list(jieba.cut(text))
    return text_change


if __name__ == "__main__":
    text = "镜中月是天上月，眼前人是心上人"
    text_new = [" ".join(cut_word(text))]
    countvectorizer = feature_extraction.text.TfidfVectorizer()  # 参数为停用词列表， 可以将无需统计的单词进行忽略
    text1_feature = countvectorizer.fit_transform(text_new)
    print(text1_feature)  # 返回稀疏特征矩阵
    print(text1_feature.toarray())
    text1_old = countvectorizer.inverse_transform(text1_feature)
    print(text1_old)  # 返回稀疏矩阵转化前
    text1_feature_name = countvectorizer.get_feature_names_out(text_new)
    print(text1_feature_name)  # 返回特征值对应的单词.中文文本特征部分




