# 文本特征抽取 CountVectorizer

## 导入包
from sklearn.feature_extraction.text import CountVectorizer

import jieba

def textvec():
    """

    :return: None
    """
    cv = CountVectorizer();
    data = cv.fit_transform(["life is short","life is long"])
    """
  (0, 3)	1
  (0, 0)	1
  (0, 1)	1
  (1, 2)	1
  (1, 0)	1
  (1, 1)	1
    """
    print(data)
    """
    [[1 1 0 1] 这里其实是标记的get_features中每个词出现的次数
    [1 1 1 0]]
"""
    print(data.toarray()) #sparse矩阵转换好数组
    """
    ['is', 'life', 'long', 'short'] 统计所有文章中没有的词，重复的只看做一次
    """
    print(cv.get_feature_names())
    return None;


## 中文默认不支持特征抽取 要先进行分词再进行抽取
def textvecchinese():
    """

    :return: None
    """
    cv = CountVectorizer();
    #data = cv.fit_transform(["生活美好，喝杯奶茶","生活美好，晒晒太阳"])
    data = cv.fit_transform(["生活 美好，喝杯 奶茶", "生活 美好，晒晒 太阳"])
    """
  (0, 0)	1
  (0, 2)	1
  (1, 1)	1
  (1, 2)	1
  -----
   (0, 2)	1
  (0, 0)	1
  (0, 5)	1
  (0, 4)	1
  (1, 1)	1
  (1, 3)	1
  (1, 5)	1
  (1, 4)	1
    """
    print(data)
    """
   [[1 0 1]
 [0 1 1]]
 -------
 [[1 0 1 0 1 1]
 [0 1 0 1 1 1]]
"""
    print(data.toarray()) #sparse矩阵转换好数组
    """
   ['喝杯奶茶', '晒晒太阳', '生活美好']
   ----
   ['喝杯', '太阳', '奶茶', '晒晒', '生活', '美好']
    """
    print(cv.get_feature_names())
    return None;

def cutword():
   c1= jieba.cut("我们总是担心未来，却忘记了做好当下");
   c2=jieba.cut("百分之百的努力换取百分之百的人生");
   c3 =jieba.cut("生活很美好，让我们都努力一点吧");
   content1 = list(c1)
   content2 = list(c2)
   content3 = list(c3)
   # 列表转换成字符串
   con1 = ' '.join(content1)
   con2 = ' '.join(content2)
   con3 = ' '.join(content3)
   return con1,con2,con3

def hanzivec():
    """
    中午特征值化
    :return:

    """
    cv = CountVectorizer();
    # data = cv.fit_transform(["生活美好，喝杯奶茶","生活美好，晒晒太阳"])
    c1,c2,c3 = cutword();
    data = cv.fit_transform([c1,c2,c3])
    """
    ['一点', '人生', '做好', '努力', '当下', '忘记', '总是', '我们', '担心', '换取', '未来', '生活', '百分之百', '美好']
    """
    print(cv.get_feature_names())
    """
    [[0 0 1 0 1 1 1 1 1 0 1 0 0 0]
    [0 1 0 1 0 0 0 0 0 1 0 0 2 0]
    [1 0 0 1 0 0 0 1 0 0 0 1 0 1]]
    """
    print(data.toarray())

if __name__ == '__main__':
    #textvecchinese()
    hanzivec()
