# -*- coding: utf-8 -*-
"""
以Yelp 点评数据集为例说明词袋模型
"""

import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer

filename = 'train.txt'
train_df = pd.read_csv(filename, sep='\t', names=['target','text'])
train_df.head()
train_df.shape

# 1-gram
#from sklearn.externals import joblib
# 一元模型
bow_converter = CountVectorizer()
bow_converter = bow_converter.fit(train_df['text_words'])   # 这里我通常我把fit和transform分开，以便持久化模型
bow_converter.vocabulary_  # 看看里面都有神马
len(bow_converter.vocabulary_)
# joblib.dump(bow_converter, 'bow_converter') # 保存词袋模型

train_x = bow_converter.transform(train_df['text_words'])
# train_x.toarray()  # 稀疏矩阵稠密化
words = bow_converter.get_feature_names()
len(words)

# 2-gram
bigram_converter = CountVectorizer(ngram_range=(2,2), token_pattern='(?u)\\b\\w+\\b')
x2 = bigram_converter.fit_transform(train_df['text'])
bigrams = bigram_converter.get_feature_names()
len(bigrams)

# 3-gram
trigram_converter = CountVectorizer(ngram_range=(3,3), token_pattern='(?u)\\b\\w+\\b')
x3 = trigram_converter.fit_transform(train_df['text'])
trigrams = trigram_converter.get_feature_names()
len(trigrams)






