{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "searching-creator",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "import src.configs as config\n",
    "from sklearn.model_selection import train_test_split, KFold, StratifiedKFold\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "### 进行kfold建模\n",
    "def train_tfidf_model(X, model_name):\n",
    "    stopwords = ['3750', '648', '900','3370', '6122']\n",
    "    tfidf_model = TfidfVectorizer(stop_words=stopwords, ngram_range=(1, 3), max_features=20000) # feature提升至30000\n",
    "    tfidf_model.fit(X)\n",
    "    config.DOP.save(tfidf_model, model_name, is_model=True)\n",
    "    return tfidf_model\n",
    "\n",
    "\n",
    "def get_tfidf_features(X, model_name, vec_name):\n",
    "    tfidf_model = config.DOP.load_model(model_name)\n",
    "    vec = tfidf_model.transform(X)\n",
    "    config.DOP.save(vec, vec_name, is_model=False)\n",
    "    return vec\n",
    "\n",
    "\n",
    "# def train_lgb(X_train, y_train, model_name=\"lgb\"):\n",
    "#      # 别人的参数\n",
    "#     LGB_PARAMS = {\n",
    "#         \"boosting_type\": 'gbdt', \n",
    "#         \"num_leaves\": 67, # 叶子节点数\n",
    "#         \"reg_alpha\": 0.0, \n",
    "#         \"reg_lambda\": 0.5, # 修改了正则项之后，\n",
    "#         \"max_depth\": 10,  # 从6升到10\n",
    "#         \"n_estimators\": 500, \n",
    "#         \"subsample\": 0.7, \n",
    "#         \"colsample_bytree\": 0.7, \n",
    "#         \"subsample_freq\": 1,\n",
    "#         \"learning_rate\": 0.1, \n",
    "#         \"min_child_weight\": 50, \n",
    "#         \"random_state\": 2018, \n",
    "#         'feature_fraction': 0.9,  # 建树的特征选择比例\n",
    "#         'bagging_fraction': 0.8,  # 建树的样本采样比例\n",
    "#         \"n_jobs\": 12,\n",
    "#         \"num_iterations\":500, # 将训练轮次从100延长至500\n",
    "#     }\n",
    "#     lgb_model = LGBMClassifier(**LGB_PARAMS) \n",
    "#     # verbose为信息打印\n",
    "#     lgb_model.fit(X_train_sm, y_train_sm, eval_set=(X_test_sm, y_test_sm),early_stopping_rounds=20, verbose=True)\n",
    "#     config.DOP.save(lgb_model, model_name, is_model=True)\n",
    "#     labels = lgb_model.predict(X_train_sm)\n",
    "#     print(\"训练集：\")\n",
    "#     print(classification_report(y_train_sm, labels))\n",
    "#     print(\"测试集：\")\n",
    "#     test_labels = lgb_model.predict(X_test_sm)\n",
    "#     print(classification_report(y_test_sm, test_labels))\n",
    "    \n",
    "def train_lgb(X_train, y_train, model_name=\"lgb\", test_size=0.2):\n",
    "     # 别人的参数\n",
    "    X_train_sm, X_test_sm, y_train_sm, y_test_sm = train_test_split(X_train, y_train, test_size=test_size)\n",
    "    LGB_PARAMS = {\n",
    "        \"boosting_type\": 'gbdt', \n",
    "        \"num_leaves\": 67, # 叶子节点数\n",
    "        \"reg_alpha\": 0.0, \n",
    "        \"reg_lambda\": 0.5, # 修改了正则项之后，\n",
    "        \"max_depth\": 10,  # 从6升到10\n",
    "        \"n_estimators\": 500, \n",
    "        \"subsample\": 0.7, \n",
    "        \"colsample_bytree\": 0.7, \n",
    "        \"subsample_freq\": 1,\n",
    "        \"learning_rate\": 0.1, \n",
    "        \"min_child_weight\": 50, \n",
    "        \"random_state\": 2018, \n",
    "        'feature_fraction': 0.9,  # 建树的特征选择比例\n",
    "        'bagging_fraction': 0.8,  # 建树的样本采样比例\n",
    "        \"n_jobs\": 12,\n",
    "        \"num_iterations\":500, # 将训练轮次从100延长至500\n",
    "    }\n",
    "    lgb_model = LGBMClassifier(**LGB_PARAMS) \n",
    "    # verbose为信息打印\n",
    "    lgb_model.fit(X_train_sm, y_train_sm, eval_set=(X_test_sm, y_test_sm),early_stopping_rounds=20, verbose=True)\n",
    "    config.DOP.save(lgb_model, model_name, is_model=True)\n",
    "    labels = lgb_model.predict(X_train_sm)\n",
    "    print(\"训练集：\")\n",
    "    print(classification_report(y_train_sm, labels))\n",
    "    print(\"测试集：\")\n",
    "    test_labels = lgb_model.predict(X_test_sm)\n",
    "    print(classification_report(y_test_sm, test_labels))\n",
    "\n",
    "\n",
    "def main():    \n",
    "    data = pd.read_csv(config.PROCESSED_DATA_PATH + \"/cleaned_data.csv\")\n",
    "    test_data = pd.read_csv(config.DATA_PATH + \"/test_a.csv\")\n",
    "    tfidf_train_data = np.concatenate([data['text'].values, test_data['text'].values])\n",
    "    print(len(tfidf_train_data))\n",
    "\n",
    "    # # 训练\n",
    "    model_name = \"full_tfidf_dim_30000\"\n",
    "    train_tfidf_model(tfidf_train_data, model_name)\n",
    "\n",
    "    train_vec_name = \"my_train_vec_30000\"\n",
    "    test_vec_name = \"my_test_vec_30000\"\n",
    "#     # 转换\n",
    "#     X_train = get_tfidf_features(data['text'].values, model_name, train_vec_name)\n",
    "#     X_test = get_tfidf_features(test_data['text'].values, model_name,  test_vec_name)\n",
    "\n",
    "    # 加载\n",
    "    X_train = config.DOP.load_data(train_vec_name)\n",
    "    X_test = config.DOP.load_data(test_vec_name)\n",
    "\n",
    "    y_train = data['label'].values\n",
    "    train_lgb(X_train, y_train, model_name=\"lgb_20000_final\", test_size=0.1)\n",
    "\n",
    "    lgb_model = config.DOP.load_model(\"lgb_20000_final\")\n",
    "    test_labels = lgb_model.predict(X_test)\n",
    "#     test_proba = lgb_model.predict_proba(X_test)\n",
    "\n",
    "    df_label = pd.DataFrame(test_labels, columns=['label'])\n",
    "    df_label.to_csv(config.PROCESSED_DATA_PATH + \"/submit_lgb_20000_final.csv\",index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "documentary-apollo",
   "metadata": {},
   "outputs": [],
   "source": [
    "def fake_label():\n",
    "    import re\n",
    "    lgb_model = config.DOP.load_model(\"lgb_20000\")\n",
    "    test_labels = lgb_model.predict(X_test)\n",
    "    test_proba = lgb_model.predict_proba(X_test)\n",
    "\n",
    "    df_label = pd.DataFrame(test_labels, columns=['label'])\n",
    "    df_label['prob'] = np.max(test_proba, axis=1)\n",
    "    df_label['text'] = test_data['text'].apply(lambda x:re.sub(\"900|2662|885\", \"\", x))\n",
    "    df_label.to_csv(config.PROCESSED_DATA_PATH + \"/submit_lgb_20000_proba.csv\",index=None)\n",
    "    df_label = df_label[df_label['prob'] >= 0.9].copy()\n",
    "    print(df_label.shape)\n",
    "\n",
    "    values = np.concatenate([data[['label', 'cleaned_text']].values,df_label[['label','text']].values])\n",
    "    final = pd.DataFrame(values, columns=['lable','text'])\n",
    "    print(final.shape)\n",
    "    final.to_csv(config.PROCESSED_DATA_PATH + \"/cleaned_data.csv\", index=None)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
