{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']     # 显示中文\n",
    "# 为了坐标轴负号正常显示。matplotlib默认不支持中文，设置中文字体后，负号会显示异常。需要手动将坐标轴负号设为False才能正常显示负号。\n",
    "plt.rcParams['axes.unicode_minus'] = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 232,
   "outputs": [
    {
     "data": {
      "text/plain": "   Unnamed: 0      UserID  EI  NS  TF  JP MBTI类型  粉丝数  关注数  性别  ...  贴  感到  \\\n0           0  1330417035   1   1   1   0   ISFJ   71  276   0  ...  0   0   \n\n   时刻  一份  吃  回复  个人  喜欢  快来  干净  \n0   0   0  0   0   0   0   1   0  \n\n[1 rows x 2002 columns]",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>Unnamed: 0</th>\n      <th>UserID</th>\n      <th>EI</th>\n      <th>NS</th>\n      <th>TF</th>\n      <th>JP</th>\n      <th>MBTI类型</th>\n      <th>粉丝数</th>\n      <th>关注数</th>\n      <th>性别</th>\n      <th>...</th>\n      <th>贴</th>\n      <th>感到</th>\n      <th>时刻</th>\n      <th>一份</th>\n      <th>吃</th>\n      <th>回复</th>\n      <th>个人</th>\n      <th>喜欢</th>\n      <th>快来</th>\n      <th>干净</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>0</td>\n      <td>1330417035</td>\n      <td>1</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>ISFJ</td>\n      <td>71</td>\n      <td>276</td>\n      <td>0</td>\n      <td>...</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>1</td>\n      <td>0</td>\n    </tr>\n  </tbody>\n</table>\n<p>1 rows × 2002 columns</p>\n</div>"
     },
     "execution_count": 232,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data = pd.read_csv('../data/mbti_weibo_data_cantrain1500.csv')\n",
    "all_data.head(1)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 233,
   "outputs": [
    {
     "data": {
      "text/plain": "       UserID  EI  NS  TF  JP MBTI类型\n0  1330417035   1   1   1   0   ISFJ",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>UserID</th>\n      <th>EI</th>\n      <th>NS</th>\n      <th>TF</th>\n      <th>JP</th>\n      <th>MBTI类型</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>1330417035</td>\n      <td>1</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>ISFJ</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 233,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "target_data = all_data.iloc[:,1:7]\n",
    "target_data.head(1)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 234,
   "outputs": [
    {
     "data": {
      "text/plain": "   粉丝数  关注数  性别  微博数  注册年限  互动数  视频累积播放量  TOP1  TOP2  TOP3  ...  贴  感到  时刻  \\\n0   71  276   0  655    11  109        0    29    20    14  ...  0   0   0   \n\n   一份  吃  回复  个人  喜欢  快来  干净  \n0   0  0   0   0   0   1   0  \n\n[1 rows x 1995 columns]",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>粉丝数</th>\n      <th>关注数</th>\n      <th>性别</th>\n      <th>微博数</th>\n      <th>注册年限</th>\n      <th>互动数</th>\n      <th>视频累积播放量</th>\n      <th>TOP1</th>\n      <th>TOP2</th>\n      <th>TOP3</th>\n      <th>...</th>\n      <th>贴</th>\n      <th>感到</th>\n      <th>时刻</th>\n      <th>一份</th>\n      <th>吃</th>\n      <th>回复</th>\n      <th>个人</th>\n      <th>喜欢</th>\n      <th>快来</th>\n      <th>干净</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>71</td>\n      <td>276</td>\n      <td>0</td>\n      <td>655</td>\n      <td>11</td>\n      <td>109</td>\n      <td>0</td>\n      <td>29</td>\n      <td>20</td>\n      <td>14</td>\n      <td>...</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>0</td>\n      <td>1</td>\n      <td>0</td>\n    </tr>\n  </tbody>\n</table>\n<p>1 rows × 1995 columns</p>\n</div>"
     },
     "execution_count": 234,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "features_data0 = all_data.iloc[:,7:2002]\n",
    "features_data0.head(1)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 235,
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "from sklearn.feature_selection import (SelectKBest, chi2, SelectPercentile, SelectFromModel, SequentialFeatureSelector, SequentialFeatureSelector,f_regression\n",
    ")\n",
    "\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "scaler = MinMaxScaler()\n",
    "\n",
    "y_EI = target_data[\"EI\"]\n",
    "y_NS = target_data[\"NS\"]\n",
    "y_TF = target_data[\"TF\"]\n",
    "y_JP = target_data[\"JP\"]\n",
    "\n",
    "y_list = {\"y_EI\":y_EI,\"y_NS\":y_NS,\"y_TF\":y_TF,\"y_JP\":y_JP}\n",
    "X  = features_data0"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 236,
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "def trainModelTest(X_train, X_test, y_train, y_test):\n",
    "    X_train  = scaler.fit_transform(X_train)  #对自变量X做标准化处理\n",
    "    X_test  = scaler.fit_transform(X_test)  #对自变量X做标准化处理\n",
    "\n",
    "    # 创建逻辑回归模型\n",
    "    log = LogisticRegression()\n",
    "    log.fit(X_train, y_train)\n",
    "\n",
    "    # 预测测试集结果\n",
    "    y_pred0 = log.predict(X_train)\n",
    "\n",
    "    y_pred = log.predict(X_test)\n",
    "    score_l = log.score(X_test,y_test)\n",
    "    result ={'accuracy':score_l,'X_train':X_train, 'X_test':X_test, 'y_train':y_train, 'y_test':y_test,'y_pred':y_pred ,'y_pred0':y_pred0,'model':log}\n",
    "    return result"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 237,
   "outputs": [],
   "source": [
    "#神经网络\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "%matplotlib inline\n",
    "\n",
    "\n",
    "#做标准化处理(神经网络会对数值敏感，数值越大它会认为越重要，因此需要标准差)\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "scaler = StandardScaler()\n",
    "# input_features = scaler.fit_transform(features)  #对自变量X做标准化处理\n",
    "\n",
    "def trainFCNN(X_train, X_test, y_train, y_test):\n",
    "\n",
    "    #标签\n",
    "    labels = np.array(y_train)\n",
    "    #特征\n",
    "    input_features = X_train\n",
    "    #特征名列表\n",
    "    # feature_list = list(features.columns)\n",
    "\n",
    "    #定义网络\n",
    "    input_size = input_features.shape[1]\n",
    "    hidden_size = 24\n",
    "    hidden_size2 = 48\n",
    "    hidden_size3 = 128\n",
    "    hidden_size4 = 256\n",
    "    hidden_size5 = 512\n",
    "    output_size = 1\n",
    "    batch_size = 1#分批训练\n",
    "\n",
    "    my_nn = torch.nn.Sequential(\n",
    "        torch.nn.Linear(input_size,hidden_size3),\n",
    "        torch.nn.ReLU(),\n",
    "        torch.nn.Linear(hidden_size3,hidden_size4),\n",
    "        torch.nn.ReLU(),\n",
    "        torch.nn.Linear(hidden_size4,output_size)\n",
    "    )\n",
    "\n",
    "    cost = torch.nn.MSELoss(reduction='mean')\n",
    "    optimizer = torch.optim.Adam(my_nn.parameters(),lr=0.001)\n",
    "\n",
    "    #训练网络\n",
    "    losses = []\n",
    "    for i in range(10):\n",
    "        batch_loss = []\n",
    "        #MINI-Batch方法来训练\n",
    "        for start in range(0,len(input_features),batch_size):\n",
    "            end = start + batch_size if start +batch_size < len(input_features) else len(input_features)\n",
    "            xx = torch.tensor(input_features[start:end], dtype= torch.float, requires_grad= True)\n",
    "            yy = torch.tensor(labels[start:end], dtype= torch.float, requires_grad= True)\n",
    "            prediction = my_nn(xx)\n",
    "            loss = cost(prediction,yy)\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward(retain_graph=True)\n",
    "            optimizer.step()\n",
    "            batch_loss.append(loss.data.numpy())\n",
    "\n",
    "        #打印损失\n",
    "        if i%10==0:\n",
    "            losses.append(np.mean(batch_loss))\n",
    "            # print(i,np.mean(batch_loss))\n",
    "\n",
    "    #预测训练结果\n",
    "    x0 = torch.tensor(X_train,dtype=torch.float)\n",
    "    y_pred0 = my_nn(x0).data.numpy()\n",
    "    y_pred0 = [1 if i > 0.5 else 0 for i in y_pred0]  # 将预测值转化为类别标签\n",
    "\n",
    "    x = torch.tensor(X_test,dtype=torch.float)\n",
    "    y_pred = my_nn(x).data.numpy()\n",
    "    y_pred = [1 if i > 0.5 else 0 for i in y_pred]  # 将预测值转化为类别标签\n",
    "\n",
    "    # 计算准确率\n",
    "    accuracy = accuracy_score(y_test, y_pred)\n",
    "    result ={'accuracy':accuracy,'X_train':X_train, 'X_test':X_test, 'y_train':y_train, 'y_test':y_test,'y_pred':y_pred ,'y_pred0':y_pred0,'model':my_nn}\n",
    "    # print(accuracy)\n",
    "    return result"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 242,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "y_EI\n",
      "y_NS\n",
      "y_TF\n",
      "y_JP\n",
      "0 =====训练=== 0\n",
      "LOG_y_EI  :  0.7277777777777777\n",
      "FCNN_y_EI  :  0.6444444444444445\n",
      "LOG_y_NS  :  0.7305555555555555\n",
      "FCNN_y_NS  :  0.7\n",
      "LOG_y_TF  :  0.7138888888888889\n",
      "FCNN_y_TF  :  0.6388888888888888\n",
      "LOG_y_JP  :  0.6972222222222222\n",
      "FCNN_y_JP  :  0.6361111111111111\n",
      "=====结果===\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "bestlog= []\n",
    "# bestLGBresult= []\n",
    "y_name = [\"y_EI\",\"y_NS\",\"y_TF\",'y_JP']\n",
    "allResult = {}\n",
    "\n",
    "for i in range(1):\n",
    "    for name in y_name:\n",
    "        print(name)\n",
    "        y = y_list[name]\n",
    "\n",
    "        bestlog_Score = 0\n",
    "        bestlog_new_features = \"\"\n",
    "        bestlog_Logresult = []\n",
    "\n",
    "        for k in range(5,1000):\n",
    "            # if k%100==0:\n",
    "                # print(k)\n",
    "            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=i)\n",
    "            # selector = SelectKBest(score_func=f_regression, k=k)\n",
    "            selector = SelectKBest(score_func=chi2, k=k)\n",
    "            X_new = selector.fit_transform(X, y)\n",
    "\n",
    "            # 输出结果\n",
    "            mask = selector.get_support() # 获得特征掩码\n",
    "            new_features = X.columns[mask] # 选择重要的特征\n",
    "\n",
    "\n",
    "            Logresult = trainModelTest(X_train[new_features],X_test[new_features],y_train,y_test)\n",
    "            # Logresult2 = trainModelTest(X_train,X_test,y_train,y_test)\n",
    "            logScore = Logresult['accuracy']\n",
    "            # logScore2 = Logresult2['accuracy']\n",
    "\n",
    "            if logScore >bestlog_Score:\n",
    "                bestlog_Score = logScore\n",
    "                bestlog_new_features=new_features\n",
    "                bestlog_Logresult=Logresult\n",
    "\n",
    "            # if logScore2 >bestlog_Score:\n",
    "            #     bestlog_Score = logScore2\n",
    "            #     bestlog_new_features=\"all\"\n",
    "            #     bestlog_Logresult=Logresult2\n",
    "\n",
    "        #跑完保存结果，用于后续分析\n",
    "        # bestlog.append({\"name\":name,\"k\":k,\"bestlog_Score\":bestlog_Score,\"bestlog_new_features\":bestlog_new_features,'Logresult':Logresult})\n",
    "        allResult['LOG_'+name]=bestlog_Logresult\n",
    "        allResult['LOG_'+name+'_features']=bestlog_new_features\n",
    "        allResult['FCNN_'+name]=trainFCNN(bestlog_Logresult['X_train'],bestlog_Logresult['X_test'],bestlog_Logresult['y_train'],bestlog_Logresult['y_test'])\n",
    "\n",
    "            # LGBresult = lgbTrain(X[new_features],y)\n",
    "            # LGBScore = LGBresult['accuracy']\n",
    "            # if LGBScore >bestlog_Score:\n",
    "            #     bestlog_Score = LGBScore\n",
    "            #     bestlog_new_features=new_features\n",
    "\n",
    "        # bestlog.append({\"name\":name,\"k\":k,\"bestLGB_Score\":bestlog_Score,\"bestLGBnew_features\":bestlog_new_features})\n",
    "        # bestLGBresult.append({'name':name,'LGBresult':LGBresult,\"bestLGBnew_features\":bestlog_new_features})\n",
    "\n",
    "    allResult2 = allResult\n",
    "    print(i,\"=====训练===\",i)\n",
    "    for record in allResult2:\n",
    "        if len(allResult2[record])==8:\n",
    "            print(record,\" : \",allResult2[record]['accuracy'])\n",
    "    print(\"=====结果===\")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 239,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LOG_y_EI  :  0.6708333333333333\n",
      "LOG_y_EI_features  :  54\n",
      "FCNN_y_EI  :  0.6833333333333333\n",
      "LOG_y_NS  :  0.625\n",
      "LOG_y_NS_features  :  152\n",
      "FCNN_y_NS  :  0.6291666666666667\n",
      "LOG_y_TF  :  0.6541666666666667\n",
      "LOG_y_TF_features  :  269\n",
      "FCNN_y_TF  :  0.65\n",
      "LOG_y_JP  :  0.7\n",
      "LOG_y_JP_features  :  590\n",
      "FCNN_y_JP  :  0.6833333333333333\n"
     ]
    }
   ],
   "source": [
    "# allResult1 = allResult\n",
    "for record in allResult1:\n",
    "    if len(allResult1[record])==8:\n",
    "        print(record,\" : \",allResult1[record]['accuracy'])\n",
    "    # else:\n",
    "    #     print(record,\" : \",len(allResult1[record]))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 240,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LOG_y_EI  :  0.7277777777777777\n",
      "FCNN_y_EI  :  0.6166666666666667\n",
      "LOG_y_NS  :  0.7305555555555555\n",
      "FCNN_y_NS  :  0.6833333333333333\n",
      "LOG_y_TF  :  0.7138888888888889\n",
      "FCNN_y_TF  :  0.625\n",
      "LOG_y_JP  :  0.6972222222222222\n",
      "FCNN_y_JP  :  0.6222222222222222\n"
     ]
    }
   ],
   "source": [
    "allResult2 = allResult\n",
    "for record in allResult2:\n",
    "    if len(allResult2[record])==8:\n",
    "        print(record,\" : \",allResult2[record]['accuracy'])\n",
    "    # else:\n",
    "    #     print(record,\" : \",len(allResult2[record]))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "outputs": [],
   "source": [
    "import lightgbm as lgb\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "# warnings.filterwarnings('Warning')\n",
    "\n",
    "def lgbTrain(X,y):\n",
    "    X  = scaler.fit_transform(X)  #对自变量X做标准化处理\n",
    "    # 划分训练集和测试集\n",
    "    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "    # 创建LightGBM数据集\n",
    "    d_train = lgb.Dataset(X_train, label=y_train)\n",
    "\n",
    "    # 设置参数\n",
    "    params = {\n",
    "        'boosting_type': 'gbdt',\n",
    "        'objective': 'binary',\n",
    "        'metric': {'binary_logloss', 'auc'},\n",
    "        'num_leaves': 31,\n",
    "        'learning_rate': 0.01,\n",
    "        'feature_fraction': 0.9,\n",
    "        'bagging_fraction': 0.8,\n",
    "        'bagging_freq': 5,\n",
    "        'verbose': -1\n",
    "    }\n",
    "\n",
    "    # 训练模型\n",
    "    gbm = lgb.train(params, d_train, 200)\n",
    "\n",
    "    # 预测测试集结果\n",
    "    y_pred = gbm.predict(X_test)\n",
    "    y_pred = [1 if i > 0.5 else 0 for i in y_pred]  # 将预测值转化为类别标签\n",
    "\n",
    "    # 计算准确率\n",
    "    accuracy = accuracy_score(y_test, y_pred)\n",
    "    result ={'accuracy':accuracy,'X_train':X_train, 'X_test':X_test, 'y_train':y_train, 'y_test':y_test }\n",
    "    return result\n",
    "# print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "y_EI : 0.6708333333333333\n",
      "y_EI : 0.6416666666666667\n",
      "y_NS : 0.6208333333333333\n",
      "y_NS : 0.6041666666666666\n",
      "y_TF : 0.6708333333333333\n",
      "y_TF : 0.6833333333333333\n",
      "y_JP : 0.6958333333333333\n",
      "y_JP : 0.6916666666666667\n"
     ]
    }
   ],
   "source": [
    "for blog in bestlog:\n",
    "    lgbresult = blog['Logresult']\n",
    "# lgbresult = bestLGBresult[0]['LGBresult']\n",
    "\n",
    "    acc = trainFCNN(lgbresult['X_train'],lgbresult['X_test'],lgbresult['y_train'],lgbresult['y_test'])\n",
    "    print(blog['name'],\":\",lgbresult['accuracy'])\n",
    "    print(blog['name'],\":\",acc)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "outputs": [],
   "source": [
    "from sklearn.model_selection import GridSearchCV\n",
    "from lightgbm import LGBMClassifier\n",
    "\n",
    "def bestLGBM(X_train,y_train,X_test,y_test):\n",
    "    param_grid = {\n",
    "    'n_estimators': [200, 500, 800],\n",
    "    'learning_rate': [0.01, 0.05, 0.1],\n",
    "    'max_depth': [3, 5, 7],\n",
    "    'num_leaves': [31, 50, 100]\n",
    "    }\n",
    "    lgbm = LGBMClassifier(random_state=42)\n",
    "    grid_search = GridSearchCV(lgbm, param_grid, cv=5, scoring='accuracy')\n",
    "    grid_search.fit(X_train, y_train)\n",
    "    print('最佳参数组合：', grid_search.best_params_)\n",
    "    print('最佳得分：', grid_search.best_score_)\n",
    "    best_lgbm = grid_search.best_estimator_\n",
    "    y_pred = best_lgbm.predict(X_test)\n",
    "    accuracy = accuracy_score(y_test, y_pred)\n",
    "    return accuracy"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "for bLGB in bestLGBresult:\n",
    "#     最佳参数组合： {'learning_rate': 0.1, 'max_depth': 7, 'n_estimators': 200, 'num_leaves': 31}\n",
    "# 最佳得分： 0.668717277486911\n",
    "# [LightGBM] [Warning] Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves. (num_leaves=31).\n",
    "# y_EI : 0.6541666666666667\n",
    "\n",
    "    lgbresult = bestLGBresult[0]['LGBresult']\n",
    "    # accuracy = bestLGBM(lgbresult['X_train'],lgbresult['y_train'],lgbresult['X_test'],lgbresult['y_test'])\n",
    "    # print(bLGB['name'],\":\",accuracy)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": true
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}