import pandas as pd
import jieba
from sklearn.feature_extraction.text import CountVectorizer
from snownlp import SnowNLP
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
import pandas_profiling



# qinggan/data/Train/Train_DataSet.csv
data = pd.read_csv('data/Train/Train_DataSet.csv', header=0)
data['sentiment'] = pd.read_csv('data/Train/Train_DataSet_Label.csv', header=0)['label']

global i, rows
rows = []
i = 0
def chinese_word_cut(aalist):
    global i, rows
    mytext = aalist[1]
    i += 1
    if str(mytext) == 'nan':
        mytext = aalist[0]
    if str(mytext) == 'nan':
        rows += [i]
        return ''
    return " ".join(jieba.cut(mytext))

data['cut_content'] = data.loc[:, ['title', 'content']].apply(chinese_word_cut, axis=1)
data.drop(rows, axis=0, inplace=True)


X = data['cut_content']
y = data.sentiment
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01, random_state=22)


def get_custom_stopwords(stop_words_file):
    with open(stop_words_file) as f:
        stopwords = f.read()
    stopwords_list = stopwords.split('\n')
    custom_stopwords_list = [i for i in stopwords_list]
    return custom_stopwords_list


stop_words_file = 'data/哈工大停用词表.txt'
stopwords = get_custom_stopwords(stop_words_file)

vect = CountVectorizer(max_df=0.8, min_df=3, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b', stop_words=frozenset(stopwords))

# test = pd.DataFrame(vect.fit_transform(X_train).toarray(), columns=vect.get_feature_names())

# model = MultinomialNB()
# model = DecisionTreeClassifier()
model = SVR()
X_train_vect = vect.fit_transform(X_train)
model.fit(X_train_vect, y_train)
train_score = model.score(X_train_vect, y_train)
print(train_score)


X_test_vect = vect.transform(X_test)
print(model.score(X_test_vect, y_test))

X_vec = vect.transform(X)
nb_result = model.predict(X_vec)
data['nb_result'] = nb_result


data_tst = pd.read_csv('data/Test_DataSet.csv', header=0)
rows = []
i = 0
data_tst['cut_content'] = data_tst.loc[:, ['title', 'content']].apply(chinese_word_cut, axis=1)
tstX = data_tst['cut_content']
tstX_vec = vect.transform(tstX)
nb_result = model.predict(tstX_vec)

res = pd.concat([data_tst['id'], pd.Series(nb_result)], axis=1)
res.columns = ['id', 'label']
res.to_csv('res/res-4.csv', header=True, index=False)




cols = vect.get_feature_names()
arr_X= X_train_vect.toarray()
pdX = pd.DataFrame(arr_X, columns=cols)
# profile = pdX.profile_report(title="Census Dataset")
# profile.to_file(output_file="./res/census_report.html")

pdsum = pdX.apply(lambda x: sum(x > 0)/x.shape[0])
import matplotlib.pyplot as plt
plt.plot(range(len(pdsum)), pdsum)
plt.show()

pd005 = pdsum[pdsum>0.05]

pdX1 = pdX.loc[:, '进行']
pdX1[pdX1 > 0]