import pandas as pd, numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm
import pickle

with open('../train_data_1000.csv') as csvfile:
    train = pd.read_csv(csvfile,usecols=[2,3],chunksize=1000)
    # 写入一个新的数组
    word=[]
    classify = []
    for chunk in train:
        # print(chunk['classify'])
        word.extend(chunk['word_seg'])
        classify.extend(chunk['class'])
print(type(classify))

vec = TfidfVectorizer(ngram_range=(1,2),min_df=3, max_df=0.9,use_idf=1,smooth_idf=1, sublinear_tf=1)
trn_term_doc = vec.fit_transform(word)
with open('../test_data_1000.csv') as csvfile:
    test = pd.read_csv(csvfile,usecols=[0,2],chunksize=100)
    # 写入一个新的数组
    test_id=[]
    test_word=[]
    for chunk in test:
        # print(chunk['classify'])
        test_id.extend(chunk['id'])
        test_word.extend(chunk['word_seg'])
test_term_doc = vec.transform(test_word)
pickle.dump([word,classify,test_word,test_id,trn_term_doc,test_term_doc],open('./train_tf_idf.pkl','wb'))


