# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

def load_file():
    df = pd.read_csv("../data/spam.csv", encoding = "latin1")
    #编码
    cols = ['v1', 'v2']
    df = df[cols]
    df['type_id'] = df['v1'].factorize()[0]
    
    vectorizer = CountVectorizer()
    transformer = TfidfTransformer() 

    tfidf = transformer.fit_transform(vectorizer.fit_transform(df.v2))
    
    #tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1',
    #                        ngram_range=(1, 2), stop_words='english')
    #tfidf_model = tfidf.fit_transform(df.v2)
    features = tfidf.toarray()
    #print(vectorizer.vocabulary_)
    labels = df.type_id#得到标签
    
    x_train, x_test, y_train, y_test = train_test_split(features,labels,test_size=0.5,random_state = 0)
    
    return x_train,x_test, y_train, y_test

def load_file_test():
    df = pd.read_csv("../data/spam.csv", encoding = "latin1")
    #编码
    cols = ['v1', 'v2']
    df = df[cols]
    df['type_id'] = df['v1'].factorize()[0]
    
    vectorizer = CountVectorizer()
    transformer = TfidfTransformer() 

    tfidf = transformer.fit_transform(vectorizer.fit_transform(df.v2))
    
    #tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1',
    #                        ngram_range=(1, 2), stop_words='english')
    #tfidf_model = tfidf.fit_transform(df.v2)
    features = tfidf.toarray()
    #print(vectorizer.vocabulary_)
    labels = df.type_id#得到标签
    
    x_train, x_test, y_train, y_test = train_test_split(features,labels,test_size=0.5)
    return x_train,x_test, y_train, y_test