#coding=utf-8
#---------------------------------------------------
#导入库
import lightgbm
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, BaggingRegressor, AdaBoostRegressor
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier, LogisticRegression, Lasso, Ridge
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR, SVC
from sklearn.tree import DecisionTreeRegressor, ExtraTreeRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score, KFold

#----------------数据探索----------------
#数据预处理
#加载训练集
train_df = pd.read_csv('./基于论文摘要的文本分类与查询性问答公开数据/train.csv', sep=',')
#加载测试集
test_df = pd.read_csv('./基于论文摘要的文本分类与查询性问答公开数据/test.csv', sep=',')

#EDA数据探索性分析
train_df.head()

test_df.head()

#----------------特征工程----------------
#将Topic(Label)编码
train_df['Topic(Label)'], lbl = pd.factorize(train_df['Topic(Label)'])
"""
所谓编码，自然是将一种符号编成一种数字码-----即数字变量。
例如熟知的pd.get_dummies()就是讲一种字符型或者其他类型编程成一串数字向量，也就是所谓的one-hot编码。
另一种编码方式。pd.factorize()。
factorize英文意思：分解，分解为因数，因式分解的意思.
pd.factorize()做的也是“因式分解”，把常见的字符型变量分解为数字。
"""

#将论文的标题与摘要组合为 text 特征
train_df['Title'] = train_df['Title'].apply(lambda x: x.strip())
train_df['Abstract'] = train_df['Abstract'].fillna('').apply(lambda x: x.strip())
train_df['text'] = train_df['Title'] + ' ' + train_df['Abstract']
train_df['text'] = train_df['text'].str.lower()

test_df['Title'] = test_df['Title'].apply(lambda x: x.strip())
test_df['Abstract'] = test_df['Abstract'].fillna('').apply(lambda x: x.strip())
test_df['text'] = test_df['Title'] + ' ' + test_df['Abstract']
test_df['text'] = test_df['text'].str.lower()

#使用tfidf算法做文本特征提取
tfidf = TfidfVectorizer(max_features=2500)

"""
vocabulary_：特征和特征在TD-IDF中位置的一个对应关系，比如上例中vocabulary_的输出为，可以看出每个特征词和TD-IDF矩阵列的对应关系：
stop_words：停用词集合，当为'english'时，ENGLISH_STOP_WORDS中定义的词会被忽略，如果为list，list中的单词即为要忽略的词；
max_df: 设定当某个词超过一个df(document frequency)的上限时就忽略该词。当为0~1的float时表示df的比例，当为int时表示df数量;
get_feature_names()：返回特征列表，接上例vectorizer.get_feature_names()返回结果
fit：load数据，并计算tf-idf值；
transform：将数据转化为matrix的形式；
fit_transform：load数据并将数据转化为matrix形式，等于fit+trasform；
"""

#----------------模型训练----------------

train_tfidf = tfidf.fit_transform(train_df['text'])

"""
尝试不同分类器对分类效果影响
KNeighborsRegressor(),[ -10.8179242    -2.9989138   -31.10348003  -70.85194985 -191.852968  ]
SVC()[0.86458649 0.87155407 0.8748864  0.88518631 0.8730303 ]
SVR(),[-102.87516411   -3.04749367   -2.34752566  -11.98562661  -74.0432269 ]
Ridge(),[-98.80645946  -2.44907404  -3.14332054 -11.72723242 -69.95637588]
Lasso(),[-97.82295764  -5.95652541  -0.2140551  -18.38107326 -81.32644713]
MLPRegressor(alpha=20),[-93.64341829  -5.05367171  -0.18469931 -16.56860648 -82.1237361 ]
DecisionTreeRegressor(),[-131.45981104   -5.14767398  -10.5717832   -13.18080362  -67.45116667]
ExtraTreeRegressor(),[-124.0875777    -6.0363601   -11.29915799  -13.63240651  -71.26706667]
XGBRegressor(),[-112.78395205   -3.19789198   -3.11362802   -9.6413668   -70.70294293]
RandomForestRegressor(),
AdaBoostRegressor(),[-108.37009541   -4.10403593   -0.25330078  -12.20183068  -72.09812511]
GradientBoostingRegressor(),[-106.81400766   -3.4449038    -1.96963589  -12.59547784  -69.06403122]
BaggingRegressor(),[-129.2295254    -4.56577457   -6.38044713   -7.27697431  -57.31081043]
GaussianNB()[nan nan nan nan nan]
LogisticRegression()[0.87609815 0.87761284 0.8709482  0.88609512 0.86969697]
"""
# clf = SVC()
# clf = SVR()
# clf = GaussianNB()
# clf = KNeighborsRegressor()
# clf = Ridge()
# clf = Lasso()
# clf = MLPRegressor(alpha=20)
# clf = ExtraTreeRegressor()
# clf = AdaBoostRegressor()
# clf = BaggingRegressor()
# clf = LogisticRegression()
# clf = DecisionTreeRegressor()
# clf = XGBRegressor()
# clf = RandomForestRegressor()??
# clf = GradientBoostingRegressor()

"""
SGDClassifier()参数
loss：字符串，损失函数的类型。默认值为’hinge’
‘hinge’：合页损失函数，表示线性SVM模型
‘log’：对数损失函数，表示逻辑回归模型
‘modified_huber’：’hing’和’log’损失函数的结合，表现两者的优点
‘squared_hinge’：平方合页损失函数，表示线性SVM模型
‘perceptron’：感知机损失函数
"""
clf = SGDClassifier()
cvs = cross_val_score(clf, train_tfidf, train_df['Topic(Label)'], cv=5)
#用于获取每个交叉验证的得分,然后根据得分score来选择合适的超参数,通常需要编写手动完成交叉
print(cvs)


test_tfidf = tfidf.transform(test_df['text'])
clf = SGDClassifier()
clf.fit(train_tfidf, train_df['Topic(Label)'])
test_df['Topic(Label)'] = clf.predict(test_tfidf)

"""
fasttext算法
"""
# model = fasttext.train_supervised(train_df['text'])
# test_df['Topic(Label)'] = model.predict(test_df['text'])

#----------------结果输出----------------
print(test_df['Topic(Label)'])
test_df['Topic(Label)'] = test_df['Topic(Label)'].apply(lambda x: lbl[x])
test_df[['Topic(Label)']].to_csv('submit-tfidf-KFold.csv', index=None)