{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "warming-elizabeth",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "import src.configs as config\n",
    "from sklearn.model_selection import train_test_split, KFold, StratifiedKFold\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "import logging\n",
    "\n",
    "### 进行kfold建模\n",
    "def train_tfidf_model(X, model_name):\n",
    "    stopwords = ['3750', '648', '900','3370', '6122']\n",
    "    tfidf_model = TfidfVectorizer(stop_words=stopwords, ngram_range=(1, 3), max_features=20000)\n",
    "    tfidf_model.fit(X)\n",
    "    config.DOP.save(tfidf_model, model_name, is_model=True)\n",
    "    return tfidf_model\n",
    "\n",
    "\n",
    "def get_tfidf_features(X, model_name, vec_name):\n",
    "    tfidf_model = config.DOP.load_model(model_name)\n",
    "    vec = tfidf_model.transform(X)\n",
    "    config.DOP.save(vec, vec_name, is_model=False)\n",
    "    return vec\n",
    "\n",
    "\n",
    "    \n",
    "def train_lgb(X_train, X_test, y_train, y_test, model_name=\"lgb\", test_size=0.2):\n",
    "    LGB_PARAMS = {\n",
    "        \"boosting_type\": 'gbdt', \n",
    "        \"num_leaves\": 67, # 叶子节点数\n",
    "        \"reg_alpha\": 0.0, \n",
    "        \"reg_lambda\": 0.5, # 修改了正则项之后，\n",
    "        \"max_depth\": 5, \n",
    "        \"n_estimators\": 500, \n",
    "        \"subsample\": 0.7, \n",
    "        \"colsample_bytree\": 0.7, \n",
    "        \"subsample_freq\": 1,\n",
    "        \"learning_rate\": 0.1, \n",
    "        \"min_child_weight\": 50, \n",
    "        \"random_state\": 2018, \n",
    "        'feature_fraction': 0.9,  # 建树的特征选择比例\n",
    "        'bagging_fraction': 0.8,  # 建树的样本采样比例\n",
    "        \"n_jobs\": 10,\n",
    "        \"num_iterations\":500, # 将训练轮次从100延长至500\n",
    "    }\n",
    "    lgb_model = LGBMClassifier(**LGB_PARAMS) \n",
    "    # verbose为信息打印\n",
    "    lgb_model.fit(X_train, y_train, eval_set=(X_test, y_test), early_stopping_rounds=10, verbose=True)\n",
    "    config.DOP.save(lgb_model, model_name, is_model=True)\n",
    "    labels = lgb_model.predict(X_train)\n",
    "    print(\"训练集：\")\n",
    "    print(classification_report(y_train, labels))\n",
    "    print(\"测试集：\")\n",
    "    test_labels = lgb_model.predict(X_test)\n",
    "    print(classification_report(y_test, test_labels))\n",
    "\n",
    "def load_kf():\n",
    "    KF_saver = config.DOP.load_data(\"KF_saver\")\n",
    "    epochs = KF_saver['epoch'].unique()\n",
    "    trains = []\n",
    "    tests = []\n",
    "    for epoch in epochs:\n",
    "        tmp = KF_saver[KF_saver['epoch'] == epoch]\n",
    "        train_index = tmp['index'][tmp['is_train'] == 1].values\n",
    "        test_index = tmp['index'][tmp['is_train'] == 0].values\n",
    "        yield train_index, test_index\n",
    "\n",
    "        \n",
    "def pred_kf():\n",
    "    model_name = \"lgb{KF_index}_20000\"\n",
    "    num_class = 14\n",
    "    test_vec_name = \"my_test_vec_20000\"\n",
    "    X_test = config.DOP.load_data(test_vec_name)\n",
    "    test_size = X_test.shape[0]\n",
    "    predictions = np.zeros((test_size, num_class))\n",
    "    for i in range(6):\n",
    "        lgb_model = config.DOP.load_model(model_name.format(KF_index=i))\n",
    "        test_label_probs = lgb_model.predict_proba(X_test)\n",
    "        predictions += test_label_probs\n",
    "    perd_labels = np.argmax(predictions, axis=1)\n",
    "    final = pd.DataFrame()\n",
    "    final['label'] = perd_labels\n",
    "    final.to_csv(config.PROCESSED_DATA_PATH + \"/submit_kf5_dim20000.csv\", index=None)\n",
    "        \n",
    "\n",
    "# data = pd.read_csv(config.PROCESSED_DATA_PATH + \"/cleaned_text.csv\")\n",
    "# test_data = pd.read_csv(config.DATA_PATH + \"/test_a.csv\")\n",
    "# train_vec_name = \"my_train_vec_20000\"\n",
    "# test_vec_name = \"my_test_vec_20000\"\n",
    "# X_train = config.DOP.load_data(train_vec_name)\n",
    "# X_test = config.DOP.load_data(train_vec_name)\n",
    "# y_train = data['label'].values\n",
    "\n",
    "# for KF_index, (train_index,valid_index) in enumerate(load_kf()):\n",
    "#     print(f\"---第{KF_index}次KF---\")\n",
    "#     logging.info(\"The No. {} cross validation begines...\".format(KF_index+1))\n",
    "\n",
    "#     # divide train and valid dataset\n",
    "#     x_train_, x_valid_ = X_train[train_index], X_train[valid_index]\n",
    "#     y_train_, y_valid_ = y_train[train_index], y_train[valid_index]\n",
    "#     train_lgb(x_train_, x_valid_,y_train_, y_valid_, model_name=f\"lgb{KF_index}_20000\", test_size=0.1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "minimal-wheel",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "reverse-philosophy",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据my_test_vec_20000已加载成功\n",
      "模型lgb0_20000已加载成功\n",
      "模型lgb1_20000已加载成功\n",
      "模型lgb2_20000已加载成功\n",
      "模型lgb3_20000已加载成功\n",
      "模型lgb4_20000已加载成功\n",
      "模型lgb5_20000已加载成功\n"
     ]
    }
   ],
   "source": [
    "pred_kf()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
