## 文本特征抽取 文本占比
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba

def cutword():
   c1= jieba.cut("我们总是担心未来，却忘记了做好当下");
   c2=jieba.cut("百分之百的努力换取百分之百的人生");
   c3 =jieba.cut("生活很美好，让我们都努力一点吧");
   content1 = list(c1)
   content2 = list(c2)
   content3 = list(c3)
   # 列表转换成字符串
   con1 = ' '.join(content1)
   con2 = ' '.join(content2)
   con3 = ' '.join(content3)
   return con1,con2,con3

def tfidfVec():
    """
    中午特征值化
    :return:

    """
    cv = TfidfVectorizer();
    # data = cv.fit_transform(["生活美好，喝杯奶茶","生活美好，晒晒太阳"])
    c1,c2,c3 = cutword();
    data = cv.fit_transform([c1,c2,c3])
    """
   ['一点', '人生', '做好', '努力', '当下', '忘记', '总是', '我们', '担心', '换取', '未来', '生活', '百分之百', '美好']
    """
    print(cv.get_feature_names())
    """
   [[0.         0.         0.38988801 0.         0.38988801  0.38988801  0.38988801 0.29651988 0.38988801  0.         0.38988801  0.              0.         0.        ]
 [0.         0.38988801    0.         0.29651988 0.         0.           0.         0.         0.          0.38988801 0.         0.              0.77977602   0.        ]
 [0.49047908 0.            0.         0.37302199 0.         0.           0.         0.37302199 0.          0.         0.         0.49047908      0.         0.49047908]]
    """
    print(data.toarray())

if __name__ == '__main__':
    #textvecchinese()
    tfidfVec()