import pandas as pd  # 导入pandas库对数据进行读取
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
from sklearn.metrics import roc_auc_score

from sklearn.preprocessing import LabelEncoder, LabelBinarizer, StandardScaler  # 导入标签编码器
from sklearn.model_selection import train_test_split, GridSearchCV  # 导入数据划分器
from sklearn.linear_model import LogisticRegression  # 导入逻辑回归模型做分类
from sklearn.ensemble import RandomForestClassifier
import joblib  # 导入模型保存模块

from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier

train_file = "D:/code/datasets/train_v2_drcat_02/train_v2_drcat_02.csv"
# 加载数据
df = pd.read_csv(train_file, usecols=['text', 'label'])
# val_df = pd.read_csv('val.csv', usecols=['text', 'label'])
"""
print(df.info())
 0   text    44868 non-null  object
 1   label   44868 non-null  int64
"""

# 对标签进行编码
label_encoder = LabelEncoder()
df['label'] = label_encoder.fit_transform(df['label'])
print(df)

# label_encoder = LabelBinarizer() # 实例化二值转化为Onehot格式信息的类
# df['label'] = label_encoder.fit_transform(df['label']) # 将data的Sed这一列处理之后再次赋值给这一列，以便做变换

# 文本向量化
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['text'])
print(type(X),"#######################")
y = df['label']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)

# 训练模型
logic_clf = LogisticRegression(max_iter=1000)
logic_clf.fit(X_train, y_train)

# print(model.predict(X_test)) # 使用测试的值进行预测
# print(model.score(X_test,y_test)) # 对模型进行评估


# def catboost_GridSearch(x,y):
#     '''
#     搜索CatBoostClassifier最优参数
#     parameters：
#     x：特征
#     y：标签
#     '''
#     #定义好参数
#     Para_grid={'iterations':[300,400,500,600,700],'learning_rate':[0.005,0.01,0.03,0.05],\
#         'depth':[5,6,7,8]}
#     #模型的实例化
#     model=CatBoostClassifier()
#     #网格搜索与交叉验证
#     grid_search=GridSearchCV(model,Para_grid,cv=3)
#     #模型训练
#     grid_search.fit(x,y)
#     return(grid_search.best_params_,grid_search.best_estimator_)
#
# catboost_GridSearch(X_train,y_train) #上述定义的catboost超参数来自这里

# xgb_clf = XGBClassifier()
# lgb_clf = LGBMClassifier()
# # 也可以采用上述交叉验证方法进行操作，此处省略该方法
# cat_clf = CatBoostClassifier(**{'depth': 8, 'iterations': 300, 'learning_rate': 0.03})  # 这里的参数来自于网格搜索
#
# xgb_clf.fit(X_train, y_train)
# lgb_clf.fit(X_train, y_train)
# cat_clf.fit(X_train, y_train)
# 分别计算三种算法的分数
# print(xgb_clf.score(X_test,y_test),lgb_clf.score(X_test,y_test),cat_clf.score(X_test,y_test))

# 预测验证集 方法1
y_pred = logic_clf.predict_proba(X_test)[:, 1]
print(y_pred)
# 计算AUC
auc = roc_auc_score(y_test, y_pred)
print(f'logic AUC {auc}')

# # 预测测试集 方法2
# y_pred = xgb_clf.predict_proba(X_test)[:, 1]
# # 计算AUC
# auc = roc_auc_score(y_test, y_pred)
# print(f'xgb_clf AUC {auc}')
#
# # 预测测试集 方法3
# y_pred = lgb_clf.predict_proba(X_test)[:, 1]
# # 计算AUC
# auc = roc_auc_score(y_test, y_pred)
# print(f'lgb_clf AUC {auc}')
#
# # 预测测试集 方法4
# y_pred = cat_clf.predict_proba(X_test)[:, 1]
# # 计算AUC
# auc = roc_auc_score(y_test, y_pred)
# print(f'cat_clf AUC {auc}')
#
# # 保存模型
joblib.dump(logic_clf, str(Path("D:/code/models") / "logist_regression_model1.pkl"))
# joblib.dump(xgb_clf, str(Path("D:/code/models") / "xgb_clf.pkl"))
# joblib.dump(lgb_clf, str(Path("D:/code/models") / "lgb_clf.pkl"))
# joblib.dump(cat_clf, str(Path("D:/code/models") / "cat_clf.pkl"))

"""
# logic AUC 0.9988795464437388
xgb_clf AUC 0.9992873539811155
lgb_clf AUC 0.9991473494492252
cat_clf AUC 0.9984312606959498
"""

val_file = "D:/code/datasets/train_v2_drcat_02/val.csv"
# 加载数据
df = pd.read_csv(val_file, usecols=['text'])
# 文本向量化
vectorizer = TfidfVectorizer()
data = vectorizer.fit_transform(df['text']).toarray()

model1 = joblib.load(str(Path("D:/code/models") / "logist_regression_model1.pkl"))
model1.predict(data)
# pre = pd.DataFrame(model1.predict(data))
# print(pre)
# pre =  pre.rename(columns={0:"Survived"})
# 指定数据集合的位置
# titanic_dataset_path = "E:/code/datasets/titanic/"
# data = pd.read_csv(str(Path(titanic_dataset_path)/"test.csv")) #加载数据并进行查看
# test =  data["PassengerId"]
# submission = pd.concat([test,pre],axis=1)
# submission.to_csv(str(Path(titanic_dataset_path)/"submission.csv"),index=False)