{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "import lightgbm as lgb\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import accuracy_score\n",
    "import time\n",
    "from concurrent.futures import ThreadPoolExecutor\n",
    "from sklearn.ensemble import VotingClassifier\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import BaggingClassifier\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.model_selection import RandomizedSearchCV\n",
    "from sklearn.ensemble import BaggingClassifier,RandomForestClassifier,ExtraTreesClassifier,GradientBoostingClassifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data = pd.read_hdf('data/train_transform.h5')\n",
    "test_all_data = pd.read_hdf('data/test_transform.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train = train_all_data.drop('type', axis=1)\n",
    "y_train = train_all_data[\"type\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#X_train, X_test, y_train, y_test = train_test_split(X_data, Y_data, test_size=0.3, random_state=42)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train():\n",
    "    param_distribution = {\n",
    "        #\"lgb_clf__boosting\":[\"dart\", \"gbdt\", \"goss’\"],\n",
    "        \"mlp__solver\":[\"sgd\",\"lbfgs\",\"adam\"],\n",
    "        \"mlp__activation\":[\"identity\",\"logistic\",\"tanh\",\"relu\"],\n",
    "        \"mlp__learning_rate\":[\"constant\",\"invscaling\",\"adaptive\"],\n",
    "        \"mlp__hidden_layer_sizes\":[(50,50),(100,100),(200,200),(300,300)]\n",
    "    }\n",
    "    voting_clf = VotingClassifier(estimators=[\n",
    "#         (\"lgb_clf\",lgb.LGBMClassifier(learning_rate=0.01, min_child_samples=5,max_depth=7,\n",
    "#                                      lambda_l1=2,boosting=\"gbdt\",objective=\"multiclass\",\n",
    "#                                      n_estimators=2000,metric='multi_error',num_class=3,\n",
    "#                                      feature_fraction=0.75,bagging_fraction=0.85,seed=99,\n",
    "#                                      num_threads=20,verbose=-1,n_jobs=-1)),\n",
    "        (\"mlp\",MLPClassifier(solver='sgd', activation='relu',learning_rate=\"constant\",\n",
    "                            hidden_layer_sizes=(100,100))),\n",
    "#         (\"svm_clf\",SVC(probability=True)),\n",
    "#         (\"bagging_clf\",BaggingClassifier(DecisionTreeClassifier(),\n",
    "#                                         n_estimators=500,\n",
    "#                                         max_samples=100,\n",
    "#                                         bootstrap=True)),\n",
    "#         (\"RandomForestClassifier\",RandomForestClassifier(n_estimators=500,\n",
    "#                                                             max_leaf_nodes=16,\n",
    "#                                                             random_state=666,\n",
    "#                                                             oob_score=True,\n",
    "#                                                             n_jobs=-1)),\n",
    "#         (\"ExtraTreesClassifier\",ExtraTreesClassifier(n_estimators=500,\n",
    "#                                                     bootstrap=True,\n",
    "#                                                     oob_score=True,\n",
    "#                                                     random_state=666)),\n",
    "#         (\"GradientBoostingClassifier\",GradientBoostingClassifier(max_depth=2,n_estimators=30))\n",
    "    ],voting=\"hard\")\n",
    "    # 随机搜索超参数\n",
    "    random_search_cv = RandomizedSearchCV(voting_clf,\n",
    "                                      param_distribution,\n",
    "                                      n_iter = 100, #训练300次，数值越大，获得的参数精度越大，但是搜索时间越长\n",
    "                                      cv = 3,\n",
    "                                      n_jobs = -1 #使用所有的CPU进行训练，默认为1，使用1个CPU\n",
    "                                         ) \n",
    "    random_search_cv.fit(X_train,y_train)\n",
    "    return random_search_cv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/carl-hui/.virtualenvs/AI/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "84.935406\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/carl-hui/.virtualenvs/AI/lib/python3.7/site-packages/sklearn/neural_network/_multilayer_perceptron.py:470: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n",
      "/home/carl-hui/.virtualenvs/AI/lib/python3.7/site-packages/ipykernel_launcher.py:8: DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead\n",
      "  \n"
     ]
    }
   ],
   "source": [
    "start = time.clock()\n",
    "\n",
    "# with ThreadPoolExecutor(max_workers=5) as t: \n",
    "#     f1 = t.submit(train,models,oof)\n",
    "#     models,oof = f1.result()\n",
    "model = train()\n",
    "\n",
    "end = time.clock()\n",
    "print(str(end-start))  #263.69961700000005"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'mlp__solver': 'lbfgs', 'mlp__learning_rate': 'constant', 'mlp__hidden_layer_sizes': (100, 100), 'mlp__activation': 'tanh'}\n",
      "0.6872833345147974\n",
      "VotingClassifier(estimators=[('mlp',\n",
      "                              MLPClassifier(activation='tanh', alpha=0.0001,\n",
      "                                            batch_size='auto', beta_1=0.9,\n",
      "                                            beta_2=0.999, early_stopping=False,\n",
      "                                            epsilon=1e-08,\n",
      "                                            hidden_layer_sizes=(100, 100),\n",
      "                                            learning_rate='constant',\n",
      "                                            learning_rate_init=0.001,\n",
      "                                            max_fun=15000, max_iter=200,\n",
      "                                            momentum=0.9, n_iter_no_change=10,\n",
      "                                            nesterovs_momentum=True,\n",
      "                                            power_t=0.5, random_state=None,\n",
      "                                            shuffle=True, solver='lbfgs',\n",
      "                                            tol=0.0001, validation_fraction=0.1,\n",
      "                                            verbose=False, warm_start=False))],\n",
      "                 flatten_transform=True, n_jobs=None, voting='hard',\n",
      "                 weights=None)\n"
     ]
    }
   ],
   "source": [
    "print(model.best_params_)\n",
    "print(model.best_score_)\n",
    "print(model.best_estimator_)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "plabels = model.predict(X_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "oof f1 0.45984268938518397\n",
      "train accuracy 0.6702857142857143\n"
     ]
    }
   ],
   "source": [
    "print('oof f1', metrics.f1_score(y_train,plabels, average='macro'))\n",
    "print('train accuracy', accuracy_score(y_train,plabels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生产"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "sub = test_all_data[['ship']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "production_pred = model.predict(test_all_data.drop('ship', axis=1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2    0.8505\n",
      "0    0.1325\n",
      "1    0.0170\n",
      "Name: pred, dtype: float64\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/carl-hui/.virtualenvs/AI/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n",
      "/home/carl-hui/.virtualenvs/AI/lib/python3.7/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  after removing the cwd from sys.path.\n"
     ]
    }
   ],
   "source": [
    "sub['pred'] = production_pred\n",
    "\n",
    "print(sub['pred'].value_counts(1))\n",
    "sub['pred'] = sub['pred'].map({0:'围网',1:'刺网',2:'拖网'})\n",
    "sub.to_csv('data/result.csv', index=None, header=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
