import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVR
import pandas_profiling
from sklearn.model_selection import KFold
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import numpy as np
from scipy.sparse import csc_matrix
import pkuseg
my_seg = pkuseg.pkuseg()
import jieba
my_seg = jieba


def get_custom_stopwords(stop_words_file):
    with open(stop_words_file) as f:
        stopwords = f.read()
    stopwords_list = stopwords.split('\n')
    custom_stopwords_list = [i for i in stopwords_list]
    return custom_stopwords_list


stop_words_file = 'data/哈工大停用词表.txt'
stopwords = get_custom_stopwords(stop_words_file)
vect = CountVectorizer(max_df=0.8, min_df=3, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b', stop_words=frozenset(stopwords))
# vect = TfidfVectorizer(max_df=0.8, min_df=3, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b', stop_words=frozenset(stopwords))


data = pd.read_csv('data/Train/Train_DataSet.csv', header=0)
data['sentiment'] = pd.read_csv('data/Train/Train_DataSet_Label.csv', header=0)['label']

global i, rows
rows = []
i = 0


def chinese_word_cut(aalist):
    global i, rows
    mytext = aalist[1]
    i += 1
    if str(mytext) == 'nan':
        mytext = aalist[0]
    if str(mytext) == 'nan':
        rows += [i]
        return ''
    return " ".join(my_seg.cut(mytext))


data['cut_content'] = data.loc[:, ['title', 'content']].apply(chinese_word_cut, axis=1)
data.drop(rows, axis=0, inplace=True)


X = data['cut_content']
y = data.sentiment
X_vect = vect.fit_transform(X)
cols = vect.get_feature_names()
arr_X = X_vect.toarray()
pdX = pd.DataFrame(arr_X, columns=cols)
pdsum = pdX.apply(lambda x: sum(x > 0)/x.shape[0])
pd005 = pdsum # [pdsum > 0.0005]
header = pd005.index.to_list()
X_sel_cols = pdX.loc[:, header]



model = MultinomialNB()
# model = DecisionTreeClassifier()
# model = SVR()
# model = GradientBoostingClassifier()

kf = KFold(n_splits=10, shuffle=True, random_state=1)
for train_index, test_index in kf.split(X):
    X_trn, y_trn = X_sel_cols.iloc[train_index, :], y.iloc[train_index]
    X_val, y_val = X_sel_cols.iloc[test_index, :], y.iloc[test_index]

    X_trn_sparse = csc_matrix(X_trn, dtype=np.float64)
    X_val_sparse = csc_matrix(X_val, dtype=np.float64)

    model.fit(X_trn_sparse, y_trn)
    trn_score = model.score(X_trn_sparse, y_trn)
    print(trn_score)

    val_score = model.score(X_val_sparse, y_val)
    print(val_score)
    break


data_tst = pd.read_csv('data/Test_DataSet.csv', header=0)
data_tst['cut_content'] = data_tst.loc[:, ['title', 'content']].apply(chinese_word_cut, axis=1)
tstX = data_tst['cut_content']
tstX_vec = vect.transform(tstX)
pdX_tst = pd.DataFrame(tstX_vec.toarray(), columns=cols).loc[:, header]
X_trn_sparse = csc_matrix(pdX_tst, dtype=np.float64)
nb_result = model.predict(X_trn_sparse)
res = pd.concat([data_tst['id'], pd.Series(nb_result)], axis=1)
res.columns = ['id', 'label']
res.to_csv('res/res-11.csv', header=True, index=False)

