{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 首先 import 必要的模块\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "from matplotlib import pyplot\n",
    "import seaborn as sns\n",
    "%matplotlib inline\n",
    "from sklearn.metrics import recall_score\n",
    "from pandas import DataFrame\n",
    "from sklearn.metrics import log_loss\n",
    "import tensorflow as tf\n",
    "from sklearn.preprocessing import OneHotEncoder\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [],
   "source": [
    "allPdata=pd.read_csv(\"PositiveData.csv\",index_col=0)\n",
    "allNdata=pd.read_csv(\"NegativeData.csv\",index_col=0)\n",
    "allTdata=pd.read_csv(\"test.csv\",index_col=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [],
   "source": [
    "allPdata['lable']=1\n",
    "allNdata['lable']=0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [],
   "source": [
    "#标准化\n",
    "key1=['B_ai1_fft_frequency_base200_max','B_ai1_fft_frequency_var','B_ai1_fft_y_base200_max','B_ai1_fft_y_base2_max', 'B_ai1_fft_y_base_max', 'B_ai1_time_boxing', 'B_ai1_time_fengzhi', 'B_ai1_time_maichong','B_ai1_time_std','B_ai1_time_yudu','B_ai2_fft_frequency_var','B_ai2_fft_y_base_max','B_ai2_fft_y_max','B_ai2_time_boxing','B_ai2_time_kurt','B_ai2_time_mean','B_ai2_time_rms', 'B_ai2_time_std', 'F_ai1_fft_frequency_base_max','F_ai1_fft_frequency_mean', 'F_ai1_fft_frequency_var','F_ai1_fft_y_base200_max', 'F_ai1_fft_y_base2_max','F_ai1_fft_y_base3_max','F_ai1_time_boxing','F_ai1_time_fengzhi','F_ai1_time_kurt','F_ai1_time_maichong', 'F_ai1_time_std','F_ai1_time_yudu',  'F_ai2_fft_frequency_base2_max','F_ai2_fft_frequency_base2_max','F_ai2_fft_frequency_mean','F_ai2_fft_frequency_var','F_ai2_fft_y_base200_max', 'F_ai2_fft_y_max','F_ai2_time_boxing', 'F_ai2_time_fengzhi','F_ai2_time_kurt','F_ai2_time_maichong','F_ai2_time_mean','F_ai2_time_rms', 'F_ai2_time_std', 'F_ai2_time_yudu']\n",
    "#0-1和两个特殊值\n",
    "key2=['F_ai1_fft_frequency_low_other','F_ai1_ratio_base', 'F_ai1_ratio_base2','F_ai1_ratio_base3','F_ai2_fft_frequency_low_other','F_ai2_ratio_base', 'F_ai2_ratio_base2','F_ai2_ratio_base3']\n",
    "#归一化\n",
    "key3=['ratio_all','ratio_half']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [],
   "source": [
    "train=pd.concat([allPdata,allNdata],axis=0)\n",
    "y=train['lable']\n",
    "train=train.drop('lable',axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(530, 114)"
      ]
     },
     "execution_count": 106,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ratiolist=['F_ai1_ratio_base','F_ai1_ratio_base2','F_ai1_ratio_base3','F_ai2_ratio_base','F_ai2_ratio_base2','F_ai2_ratio_base3','B_ai1_ratio_base','B_ai1_ratio_base2','B_ai1_ratio_base3','B_ai2_ratio_base','B_ai1_ratio_base2','B_ai2_ratio_base3']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [],
   "source": [
    "#无需修改\n",
    "categoryTrain=train.loc[:,key2]\n",
    "categoryTest=allTdata.loc[:,key2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sklearn.preprocessing  as preprocessing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [],
   "source": [
    "#归一化  也可以不做\n",
    "ratioTrain=train.loc[:,key3]\n",
    "ratioTest=allTdata.loc[:,key3]\n",
    "ratioAll=pd.concat([ratioTrain,ratioTest],axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\sklearn\\preprocessing\\_encoders.py:363: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\n",
      "If you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\n",
      "In case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n",
      "  warnings.warn(msg, FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "\n",
    "ratioOneHot = OneHotEncoder()\n",
    "ratioOneHot.fit_transform(ratioAll)\n",
    "onehotTrain=ratioOneHot.transform(ratioTrain)\n",
    "onehotTest=ratioOneHot.transform(ratioTest)\n",
    "oneHotTrainDf=pd.DataFrame(onehotTrain.toarray(),index=indexTrain)\n",
    "oneHotTestDf=pd.DataFrame(onehotTest.toarray(),index=indexTest)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [],
   "source": [
    "indexTrain=train.index\n",
    "indexTest=allTdata.index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [],
   "source": [
    "numericalTrain=train.loc[:,key1]\n",
    "# numericalTrain=numericalTrain.drop(['ratio_all','ratio_half'],axis=1)\n",
    "\n",
    "numericalTest=allTdata.loc[:,key1]\n",
    "# numericalTest=numericalTest.drop(['ratio_all','ratio_half'],axis=1)\n",
    "\n",
    "\n",
    "allnumerical=pd.concat([numericalTrain,numericalTest],axis=0)\n",
    "\n",
    "# numericalTrain=train.drop(ratiolist,axis=1)\n",
    "# numericalTrain=numericalTrain.drop(['ratio_all','ratio_half'],axis=1)\n",
    "\n",
    "# numericalTest=allTdata.drop(ratiolist,axis=1)\n",
    "# numericalTest=numericalTest.drop(['ratio_all','ratio_half'],axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1030, 44)"
      ]
     },
     "execution_count": 114,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# numericalTest.info()\n",
    "# numericalTest\n",
    "allnumerical.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\sklearn\\preprocessing\\data.py:617: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.\n",
      "  return self.partial_fit(X, y)\n",
      "C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\sklearn\\base.py:462: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.\n",
      "  return self.fit(X, **fit_params).transform(X)\n"
     ]
    }
   ],
   "source": [
    "# 数据标准化\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# 初始化特征的标准化器\n",
    "ss_X = StandardScaler()\n",
    "\n",
    "# 分别对训练和测试数据的特征进行标准化处理\n",
    "ss_allnumerical = ss_X.fit_transform(allnumerical)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    }
   ],
   "source": [
    "ssTrain=ss_X.transform(numericalTrain)\n",
    "ssTrain=pd.DataFrame(ssTrain,index=indexTrain)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    }
   ],
   "source": [
    "sstest=ss_X.transform(numericalTest)\n",
    "sstest=pd.DataFrame(sstest,index=indexTest)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [],
   "source": [
    "# x_train=pd.concat([x,ratioTrain,oneHotTrainDf],axis=1)\n",
    "# x_test=pd.concat([sstest,ratioTest,oneHotTestDf],axis=1)\n",
    "x_train=pd.concat([ssTrain,categoryTrain,oneHotTrainDf],axis=1)\n",
    "x_test=pd.concat([sstest,categoryTest,oneHotTestDf],axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 使用imlbearn库中上采样方法中的SMOTE接口\n",
    "# from imblearn.over_sampling import SMOTE\n",
    "# # 定义SMOTE模型，random_state相当于随机数种子的作用\n",
    "# oversampler=SMOTE(random_state=0)\n",
    "# x_train,y=oversampler.fit_sample(x_train,y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "from imblearn.over_sampling import RandomOverSampler\n",
    "\n",
    "oversampler = RandomOverSampler(random_state=0)\n",
    "x_train, y = oversampler.fit_sample(x_train, y )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集的loss 0.155466\n",
      "训练集的loss 0.110604\n",
      "训练集的loss 0.088940\n",
      "训练集的loss 0.075792\n",
      "训练集的loss 0.066743\n",
      "训练集的loss 0.060075\n",
      "训练集的loss 0.054905\n",
      "训练集的loss 0.050748\n",
      "训练集的loss 0.047314\n",
      "训练集的loss 0.044416\n",
      "训练集的loss 0.041925\n",
      "训练集的loss 0.039755\n",
      "训练集的loss 0.037844\n",
      "训练集的loss 0.036143\n",
      "训练集的loss 0.034616\n",
      "训练集的loss 0.033233\n",
      "训练集的loss 0.031973\n",
      "训练集的loss 0.030815\n",
      "训练集的loss 0.029749\n",
      "训练集的loss 0.028763\n",
      "训练集的loss 0.027849\n",
      "训练集的loss 0.026997\n",
      "训练集的loss 0.026201\n",
      "训练集的loss 0.025455\n",
      "训练集的loss 0.024755\n",
      "训练集的loss 0.024095\n",
      "训练集的loss 0.023473\n",
      "训练集的loss 0.022885\n",
      "训练集的loss 0.022329\n",
      "训练集的loss 0.021801\n",
      "训练集的loss 0.021299\n",
      "训练集的loss 0.020822\n",
      "训练集的loss 0.020367\n",
      "训练集的loss 0.019933\n",
      "训练集的loss 0.019519\n",
      "训练集的loss 0.019123\n",
      "训练集的loss 0.018744\n",
      "训练集的loss 0.018380\n",
      "训练集的loss 0.018032\n",
      "训练集的loss 0.017697\n",
      "训练集的loss 0.017376\n",
      "训练集的loss 0.017066\n",
      "训练集的loss 0.016769\n",
      "训练集的loss 0.016482\n",
      "训练集的loss 0.016205\n",
      "训练集的loss 0.015939\n",
      "训练集的loss 0.015681\n",
      "训练集的loss 0.015432\n",
      "训练集的loss 0.015192\n",
      "训练集的loss 0.014962\n",
      "训练集的loss 0.014739\n",
      "训练集的loss 0.014523\n",
      "训练集的loss 0.014313\n",
      "训练集的loss 0.014108\n",
      "训练集的loss 0.013909\n",
      "训练集的loss 0.013716\n",
      "训练集的loss 0.013528\n",
      "训练集的loss 0.013346\n",
      "训练集的loss 0.013169\n",
      "训练集的loss 0.012997\n",
      "训练集的loss 0.012829\n",
      "训练集的loss 0.012666\n",
      "训练集的loss 0.012508\n",
      "训练集的loss 0.012353\n",
      "训练集的loss 0.012203\n",
      "训练集的loss 0.012056\n",
      "训练集的loss 0.011913\n",
      "训练集的loss 0.011774\n",
      "训练集的loss 0.011637\n",
      "训练集的loss 0.011505\n",
      "训练集的loss 0.011375\n",
      "训练集的loss 0.011249\n",
      "训练集的loss 0.011125\n",
      "训练集的loss 0.011004\n",
      "训练集的loss 0.010886\n",
      "训练集的loss 0.010771\n",
      "训练集的loss 0.010658\n",
      "训练集的loss 0.010547\n",
      "训练集的loss 0.010439\n",
      "训练集的loss 0.010334\n",
      "训练集的loss 0.010230\n",
      "训练集的loss 0.010129\n",
      "训练集的loss 0.010030\n",
      "训练集的loss 0.009932\n",
      "训练集的loss 0.009837\n",
      "训练集的loss 0.009744\n",
      "训练集的loss 0.009652\n",
      "训练集的loss 0.009562\n",
      "训练集的loss 0.009474\n",
      "训练集的loss 0.009388\n",
      "训练集的loss 0.009304\n",
      "训练集的loss 0.009220\n",
      "训练集的loss 0.009139\n",
      "训练集的loss 0.009058\n",
      "训练集的loss 0.008980\n",
      "训练集的loss 0.008903\n",
      "训练集的loss 0.008827\n",
      "训练集的loss 0.008752\n",
      "训练集的loss 0.008679\n",
      "训练集的loss 0.008607\n",
      "训练集的loss 0.008537\n",
      "训练集的loss 0.008467\n",
      "训练集的loss 0.008399\n",
      "训练集的loss 0.008331\n",
      "训练集的loss 0.008265\n",
      "训练集的loss 0.008201\n",
      "训练集的loss 0.008137\n",
      "训练集的loss 0.008074\n",
      "训练集的loss 0.008011\n",
      "训练集的loss 0.007951\n",
      "训练集的loss 0.007891\n",
      "训练集的loss 0.007832\n",
      "训练集的loss 0.007774\n",
      "训练集的loss 0.007716\n",
      "训练集的loss 0.007660\n",
      "训练集的loss 0.007605\n",
      "训练集的loss 0.007550\n",
      "训练集的loss 0.007496\n",
      "训练集的loss 0.007443\n",
      "训练集的loss 0.007391\n",
      "训练集的loss 0.007339\n",
      "训练集的loss 0.007288\n",
      "训练集的loss 0.007238\n",
      "训练集的loss 0.007189\n",
      "训练集的loss 0.007141\n",
      "训练集的loss 0.007093\n",
      "训练集的loss 0.007046\n",
      "训练集的loss 0.006999\n",
      "训练集的loss 0.006953\n",
      "训练集的loss 0.006908\n",
      "训练集的loss 0.006863\n",
      "训练集的loss 0.006819\n",
      "训练集的loss 0.006776\n",
      "训练集的loss 0.006733\n",
      "训练集的loss 0.006691\n",
      "训练集的loss 0.006649\n",
      "训练集的loss 0.006607\n",
      "训练集的loss 0.006567\n",
      "训练集的loss 0.006526\n",
      "训练集的loss 0.006487\n",
      "训练集的loss 0.006447\n",
      "训练集的loss 0.006409\n",
      "训练集的loss 0.006371\n",
      "训练集的loss 0.006333\n",
      "训练集的loss 0.006296\n",
      "训练集的loss 0.006259\n",
      "训练集的loss 0.006222\n",
      "训练集的loss 0.006186\n",
      "训练集的loss 0.006150\n",
      "训练集的loss 0.006115\n",
      "训练集的loss 0.006081\n",
      "训练集的loss 0.006046\n",
      "训练集的loss 0.006012\n",
      "训练集的loss 0.005978\n",
      "训练集的loss 0.005945\n",
      "训练集的loss 0.005912\n",
      "训练集的loss 0.005879\n",
      "训练集的loss 0.005847\n",
      "训练集的loss 0.005815\n",
      "训练集的loss 0.005784\n",
      "训练集的loss 0.005753\n",
      "训练集的loss 0.005722\n",
      "训练集的loss 0.005692\n",
      "训练集的loss 0.005662\n",
      "训练集的loss 0.005633\n",
      "训练集的loss 0.005603\n",
      "训练集的loss 0.005574\n",
      "训练集的loss 0.005545\n",
      "训练集的loss 0.005516\n",
      "训练集的loss 0.005488\n",
      "训练集的loss 0.005460\n",
      "训练集的loss 0.005433\n",
      "训练集的loss 0.005406\n",
      "训练集的loss 0.005379\n",
      "训练集的loss 0.005352\n",
      "训练集的loss 0.005326\n",
      "训练集的loss 0.005299\n",
      "训练集的loss 0.005274\n",
      "训练集的loss 0.005248\n",
      "训练集的loss 0.005222\n",
      "训练集的loss 0.005197\n",
      "训练集的loss 0.005172\n",
      "训练集的loss 0.005147\n",
      "训练集的loss 0.005123\n",
      "训练集的loss 0.005099\n",
      "训练集的loss 0.005075\n",
      "训练集的loss 0.005051\n",
      "训练集的loss 0.005028\n",
      "训练集的loss 0.005005\n",
      "训练集的loss 0.004982\n",
      "训练集的loss 0.004959\n",
      "训练集的loss 0.004936\n",
      "训练集的loss 0.004913\n",
      "训练集的loss 0.004891\n",
      "训练集的loss 0.004869\n",
      "训练集的loss 0.004848\n",
      "训练集的loss 0.004826\n",
      "训练集的loss 0.004805\n",
      "训练集的loss 0.004783\n",
      "训练集的loss 0.004762\n",
      "训练集的loss 0.004741\n",
      "训练集的loss 0.004721\n",
      "训练集的loss 0.004701\n",
      "训练集的loss 0.004680\n",
      "训练集的loss 0.004661\n",
      "训练集的loss 0.004641\n",
      "训练集的loss 0.004621\n",
      "训练集的loss 0.004602\n",
      "训练集的loss 0.004582\n",
      "训练集的loss 0.004563\n",
      "训练集的loss 0.004544\n",
      "训练集的loss 0.004525\n",
      "训练集的loss 0.004506\n",
      "训练集的loss 0.004487\n",
      "训练集的loss 0.004468\n",
      "训练集的loss 0.004450\n",
      "训练集的loss 0.004432\n",
      "训练集的loss 0.004414\n",
      "训练集的loss 0.004396\n",
      "训练集的loss 0.004379\n",
      "训练集的loss 0.004362\n",
      "训练集的loss 0.004344\n",
      "训练集的loss 0.004327\n",
      "训练集的loss 0.004310\n",
      "训练集的loss 0.004293\n",
      "训练集的loss 0.004277\n",
      "训练集的loss 0.004260\n",
      "训练集的loss 0.004243\n",
      "训练集的loss 0.004227\n",
      "训练集的loss 0.004211\n",
      "训练集的loss 0.004194\n",
      "训练集的loss 0.004178\n",
      "训练集的loss 0.004162\n",
      "训练集的loss 0.004147\n",
      "训练集的loss 0.004131\n",
      "训练集的loss 0.004115\n",
      "训练集的loss 0.004100\n",
      "训练集的loss 0.004084\n",
      "训练集的loss 0.004069\n",
      "训练集的loss 0.004054\n",
      "训练集的loss 0.004039\n",
      "训练集的loss 0.004024\n",
      "训练集的loss 0.004010\n",
      "训练集的loss 0.003995\n",
      "训练集的loss 0.003981\n",
      "训练集的loss 0.003967\n",
      "训练集的loss 0.003952\n",
      "训练集的loss 0.003938\n",
      "训练集的loss 0.003924\n",
      "训练集的loss 0.003910\n",
      "训练集的loss 0.003897\n",
      "训练集的loss 0.003883\n",
      "训练集的loss 0.003869\n",
      "训练集的loss 0.003856\n",
      "训练集的loss 0.003842\n",
      "训练集的loss 0.003829\n",
      "训练集的loss 0.003816\n",
      "训练集的loss 0.003802\n",
      "训练集的loss 0.003789\n",
      "训练集的loss 0.003776\n",
      "训练集的loss 0.003763\n",
      "训练集的loss 0.003750\n",
      "训练集的loss 0.003737\n",
      "训练集的loss 0.003724\n",
      "训练集的loss 0.003711\n",
      "训练集的loss 0.003699\n",
      "训练集的loss 0.003686\n",
      "训练集的loss 0.003674\n",
      "训练集的loss 0.003662\n",
      "训练集的loss 0.003650\n",
      "训练集的loss 0.003638\n",
      "训练集的loss 0.003626\n",
      "训练集的loss 0.003614\n",
      "训练集的loss 0.003603\n",
      "训练集的loss 0.003591\n",
      "训练集的loss 0.003579\n",
      "训练集的loss 0.003568\n",
      "训练集的loss 0.003556\n",
      "训练集的loss 0.003545\n",
      "训练集的loss 0.003534\n",
      "训练集的loss 0.003522\n",
      "训练集的loss 0.003511\n",
      "训练集的loss 0.003500\n",
      "训练集的loss 0.003489\n",
      "训练集的loss 0.003478\n",
      "训练集的loss 0.003468\n",
      "训练集的loss 0.003457\n",
      "训练集的loss 0.003446\n",
      "训练集的loss 0.003435\n",
      "训练集的loss 0.003425\n",
      "训练集的loss 0.003414\n",
      "训练集的loss 0.003404\n",
      "训练集的loss 0.003393\n",
      "训练集的loss 0.003383\n",
      "训练集的loss 0.003372\n",
      "训练集的loss 0.003362\n",
      "训练集的loss 0.003352\n",
      "训练集的loss 0.003342\n",
      "训练集的loss 0.003332\n",
      "训练集的loss 0.003322\n",
      "训练集的loss 0.003312\n",
      "训练集的loss 0.003302\n",
      "训练集的loss 0.003292\n",
      "训练集的loss 0.003282\n",
      "训练集的loss 0.003273\n",
      "训练集的loss 0.003263\n",
      "训练集的loss 0.003254\n",
      "训练集的loss 0.003244\n",
      "训练集的loss 0.003235\n",
      "训练集的loss 0.003225\n",
      "训练集的loss 0.003216\n",
      "训练集的loss 0.003207\n",
      "训练集的loss 0.003198\n",
      "训练集的loss 0.003189\n",
      "训练集的loss 0.003179\n",
      "训练集的loss 0.003170\n",
      "训练集的loss 0.003161\n",
      "训练集的loss 0.003152\n",
      "训练集的loss 0.003143\n",
      "训练集的loss 0.003134\n",
      "训练集的loss 0.003125\n",
      "训练集的loss 0.003116\n",
      "训练集的loss 0.003108\n",
      "训练集的loss 0.003099\n",
      "训练集的loss 0.003090\n",
      "训练集的loss 0.003081\n",
      "训练集的loss 0.003073\n",
      "训练集的loss 0.003064\n",
      "训练集的loss 0.003055\n",
      "训练集的loss 0.003047\n",
      "训练集的loss 0.003039\n",
      "训练集的loss 0.003030\n",
      "训练集的loss 0.003022\n",
      "训练集的loss 0.003013\n",
      "训练集的loss 0.003005\n",
      "训练集的loss 0.002997\n",
      "训练集的loss 0.002989\n",
      "训练集的loss 0.002982\n",
      "训练集的loss 0.002974\n",
      "训练集的loss 0.002966\n",
      "训练集的loss 0.002958\n",
      "训练集的loss 0.002950\n",
      "训练集的loss 0.002943\n",
      "训练集的loss 0.002935\n",
      "训练集的loss 0.002927\n",
      "训练集的loss 0.002920\n",
      "训练集的loss 0.002912\n",
      "训练集的loss 0.002905\n",
      "训练集的loss 0.002897\n",
      "训练集的loss 0.002890\n",
      "训练集的loss 0.002882\n",
      "训练集的loss 0.002875\n",
      "训练集的loss 0.002867\n",
      "训练集的loss 0.002860\n",
      "训练集的loss 0.002853\n",
      "训练集的loss 0.002845\n",
      "训练集的loss 0.002838\n",
      "训练集的loss 0.002831\n",
      "训练集的loss 0.002824\n",
      "训练集的loss 0.002816\n",
      "训练集的loss 0.002809\n",
      "训练集的loss 0.002802\n",
      "训练集的loss 0.002795\n",
      "训练集的loss 0.002788\n",
      "训练集的loss 0.002781\n",
      "训练集的loss 0.002774\n",
      "训练集的loss 0.002767\n",
      "训练集的loss 0.002761\n",
      "训练集的loss 0.002754\n",
      "训练集的loss 0.002747\n",
      "训练集的loss 0.002741\n",
      "训练集的loss 0.002734\n",
      "训练集的loss 0.002728\n",
      "训练集的loss 0.002721\n",
      "训练集的loss 0.002714\n",
      "训练集的loss 0.002708\n",
      "训练集的loss 0.002701\n",
      "训练集的loss 0.002695\n",
      "训练集的loss 0.002688\n",
      "训练集的loss 0.002682\n",
      "训练集的loss 0.002675\n",
      "训练集的loss 0.002669\n",
      "训练集的loss 0.002663\n",
      "训练集的loss 0.002656\n",
      "训练集的loss 0.002650\n",
      "训练集的loss 0.002644\n",
      "训练集的loss 0.002638\n",
      "训练集的loss 0.002632\n",
      "训练集的loss 0.002626\n",
      "训练集的loss 0.002620\n",
      "训练集的loss 0.002614\n",
      "训练集的loss 0.002608\n",
      "训练集的loss 0.002602\n",
      "训练集的loss 0.002596\n",
      "训练集的loss 0.002590\n",
      "训练集的loss 0.002584\n",
      "训练集的loss 0.002578\n",
      "训练集的loss 0.002572\n",
      "训练集的loss 0.002566\n",
      "训练集的loss 0.002560\n",
      "训练集的loss 0.002555\n",
      "训练集的loss 0.002549\n",
      "训练集的loss 0.002543\n",
      "训练集的loss 0.002537\n",
      "训练集的loss 0.002531\n",
      "训练集的loss 0.002526\n",
      "训练集的loss 0.002520\n",
      "训练集的loss 0.002514\n",
      "训练集的loss 0.002509\n",
      "训练集的loss 0.002503\n",
      "训练集的loss 0.002497\n",
      "训练集的loss 0.002492\n",
      "训练集的loss 0.002486\n",
      "训练集的loss 0.002481\n",
      "训练集的loss 0.002475\n",
      "训练集的loss 0.002470\n",
      "训练集的loss 0.002464\n",
      "训练集的loss 0.002459\n",
      "训练集的loss 0.002454\n",
      "训练集的loss 0.002448\n",
      "训练集的loss 0.002443\n",
      "训练集的loss 0.002437\n",
      "训练集的loss 0.002432\n",
      "训练集的loss 0.002427\n",
      "训练集的loss 0.002421\n",
      "训练集的loss 0.002416\n",
      "训练集的loss 0.002411\n",
      "训练集的loss 0.002406\n",
      "训练集的loss 0.002401\n",
      "训练集的loss 0.002396\n",
      "训练集的loss 0.002390\n",
      "训练集的loss 0.002385\n",
      "训练集的loss 0.002380\n",
      "训练集的loss 0.002375\n",
      "训练集的loss 0.002370\n",
      "训练集的loss 0.002365\n",
      "训练集的loss 0.002360\n",
      "训练集的loss 0.002355\n",
      "训练集的loss 0.002350\n",
      "训练集的loss 0.002345\n",
      "训练集的loss 0.002340\n",
      "训练集的loss 0.002335\n",
      "训练集的loss 0.002330\n",
      "训练集的loss 0.002325\n",
      "训练集的loss 0.002321\n",
      "训练集的loss 0.002316\n",
      "训练集的loss 0.002311\n",
      "训练集的loss 0.002306\n",
      "训练集的loss 0.002302\n",
      "训练集的loss 0.002297\n",
      "训练集的loss 0.002292\n",
      "训练集的loss 0.002288\n",
      "训练集的loss 0.002283\n",
      "训练集的loss 0.002279\n",
      "训练集的loss 0.002274\n",
      "训练集的loss 0.002270\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集的loss 0.002266\n",
      "训练集的loss 0.002261\n",
      "训练集的loss 0.002257\n",
      "训练集的loss 0.002253\n",
      "训练集的loss 0.002248\n",
      "训练集的loss 0.002244\n",
      "训练集的loss 0.002240\n",
      "训练集的loss 0.002235\n",
      "训练集的loss 0.002231\n",
      "训练集的loss 0.002227\n",
      "训练集的loss 0.002223\n",
      "训练集的loss 0.002218\n",
      "训练集的loss 0.002214\n",
      "训练集的loss 0.002210\n",
      "训练集的loss 0.002206\n",
      "训练集的loss 0.002201\n",
      "训练集的loss 0.002197\n",
      "训练集的loss 0.002193\n",
      "训练集的loss 0.002189\n",
      "训练集的loss 0.002185\n",
      "训练集的loss 0.002180\n",
      "训练集的loss 0.002176\n",
      "训练集的loss 0.002172\n",
      "训练集的loss 0.002168\n",
      "训练集的loss 0.002164\n",
      "训练集的loss 0.002160\n",
      "训练集的loss 0.002156\n",
      "训练集的loss 0.002152\n",
      "训练集的loss 0.002148\n",
      "训练集的loss 0.002144\n",
      "训练集的loss 0.002139\n",
      "训练集的loss 0.002135\n",
      "训练集的loss 0.002131\n",
      "训练集的loss 0.002127\n",
      "训练集的loss 0.002123\n",
      "训练集的loss 0.002119\n",
      "训练集的loss 0.002115\n",
      "训练集的loss 0.002111\n",
      "训练集的loss 0.002108\n",
      "训练集的loss 0.002104\n",
      "训练集的loss 0.002100\n",
      "训练集的loss 0.002096\n",
      "训练集的loss 0.002092\n",
      "训练集的loss 0.002088\n",
      "训练集的loss 0.002084\n",
      "训练集的loss 0.002080\n",
      "训练集的loss 0.002076\n",
      "训练集的loss 0.002072\n",
      "训练集的loss 0.002069\n",
      "训练集的loss 0.002065\n",
      "训练集的loss 0.002061\n",
      "训练集的loss 0.002057\n",
      "训练集的loss 0.002053\n",
      "训练集的loss 0.002049\n",
      "训练集的loss 0.002046\n",
      "训练集的loss 0.002042\n",
      "训练集的loss 0.002038\n",
      "训练集的loss 0.002034\n",
      "训练集的loss 0.002031\n",
      "训练集的loss 0.002027\n",
      "训练集的loss 0.002023\n",
      "训练集的loss 0.002019\n",
      "训练集的loss 0.002016\n",
      "训练集的loss 0.002012\n",
      "训练集的loss 0.002008\n",
      "训练集的loss 0.002005\n",
      "训练集的loss 0.002001\n",
      "训练集的loss 0.001997\n",
      "训练集的loss 0.001994\n",
      "训练集的loss 0.001990\n",
      "训练集的loss 0.001986\n",
      "训练集的loss 0.001983\n",
      "训练集的loss 0.001979\n",
      "训练集的loss 0.001976\n",
      "训练集的loss 0.001972\n",
      "训练集的loss 0.001969\n",
      "训练集的loss 0.001965\n",
      "训练集的loss 0.001962\n",
      "训练集的loss 0.001958\n",
      "训练集的loss 0.001955\n",
      "训练集的loss 0.001952\n",
      "训练集的loss 0.001949\n",
      "训练集的loss 0.001945\n",
      "训练集的loss 0.001942\n",
      "训练集的loss 0.001939\n",
      "训练集的loss 0.001936\n",
      "训练集的loss 0.001933\n",
      "训练集的loss 0.001929\n",
      "训练集的loss 0.001926\n",
      "训练集的loss 0.001923\n",
      "训练集的loss 0.001920\n",
      "训练集的loss 0.001917\n",
      "训练集的loss 0.001914\n",
      "训练集的loss 0.001910\n",
      "训练集的loss 0.001907\n",
      "训练集的loss 0.001904\n",
      "训练集的loss 0.001901\n",
      "训练集的loss 0.001898\n",
      "训练集的loss 0.001895\n",
      "训练集的loss 0.001892\n",
      "训练集的loss 0.001889\n",
      "训练集的loss 0.001885\n",
      "训练集的loss 0.001882\n",
      "训练集的loss 0.001879\n",
      "训练集的loss 0.001876\n",
      "训练集的loss 0.001873\n",
      "训练集的loss 0.001870\n",
      "训练集的loss 0.001867\n",
      "训练集的loss 0.001864\n",
      "训练集的loss 0.001861\n",
      "训练集的loss 0.001858\n",
      "训练集的loss 0.001855\n",
      "训练集的loss 0.001852\n",
      "训练集的loss 0.001849\n",
      "训练集的loss 0.001846\n",
      "训练集的loss 0.001843\n",
      "训练集的loss 0.001840\n",
      "训练集的loss 0.001837\n",
      "训练集的loss 0.001834\n",
      "训练集的loss 0.001831\n",
      "训练集的loss 0.001828\n",
      "训练集的loss 0.001825\n",
      "训练集的loss 0.001822\n",
      "训练集的loss 0.001819\n",
      "训练集的loss 0.001816\n",
      "训练集的loss 0.001813\n",
      "训练集的loss 0.001810\n",
      "训练集的loss 0.001807\n",
      "训练集的loss 0.001805\n",
      "训练集的loss 0.001802\n",
      "训练集的loss 0.001799\n",
      "训练集的loss 0.001796\n",
      "训练集的loss 0.001793\n",
      "训练集的loss 0.001790\n",
      "训练集的loss 0.001787\n",
      "训练集的loss 0.001784\n",
      "训练集的loss 0.001782\n",
      "训练集的loss 0.001779\n",
      "训练集的loss 0.001776\n",
      "训练集的loss 0.001773\n",
      "训练集的loss 0.001771\n",
      "训练集的loss 0.001768\n",
      "训练集的loss 0.001765\n",
      "训练集的loss 0.001763\n"
     ]
    }
   ],
   "source": [
    "x_ = tf.placeholder(tf.float32, [None, x_train.shape[1]])\n",
    "y_ = tf.placeholder(tf.float32,shape=(None,1))\n",
    "w = tf.Variable(tf.truncated_normal([x_train.shape[1],1],stddev=0.1))\n",
    "b = tf.Variable(tf.constant(0.1, shape=[1]))\n",
    "\n",
    "logits_ = tf.matmul(x_,w)+b\n",
    "\n",
    "\n",
    "##计算交叉熵损失\n",
    "cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_, labels=y_))\n",
    "y_val=tf.nn.sigmoid(logits_)\n",
    "\n",
    "# 生成step，用ftrl优化\n",
    "train_step = tf.train.FtrlOptimizer(learning_rate=0.01,l1_regularization_strength=1,l2_regularization_strength=0).minimize(cross_entropy)\n",
    "#train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\n",
    "# # Train\n",
    "for i in range(600000):\n",
    "    #example_batch, label_batch = tf.train.shuffle_batch([example,label], batch_size=1, capacity=200, min_after_dequeue=100, num_threads=2)\n",
    "    #example_batch,label_batch = tf.train.batch([X_train,y_train.reshape(-1,1)],batch_size = 1,capacity=10)\n",
    "    #example, l = sess.run([example_batch,label_batch])\n",
    "    sess.run(train_step,feed_dict={x_:x_train, y_: y.reshape(-1,1)})\n",
    "    if (i+1) % 1000 == 0:\n",
    "        train_logloss = sess.run(cross_entropy,feed_dict={x_:x_train, y_: y.reshape(-1,1)})\n",
    "#         test_logloss = sess.run(cross_entropy, feed_dict={x_:X_test, y_: y_test.reshape(-1,1)})\n",
    "        print('训练集的loss %f'%(train_logloss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {},
   "outputs": [],
   "source": [
    "output2 = sess.run([y_val],feed_dict={x_:x_test})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 208,
   "metadata": {},
   "outputs": [],
   "source": [
    "TEST=pd.DataFrame(output2[0],index=allTdata.index,columns=['probability'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 209,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>probability</th>\n",
       "      <th>result</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>5.000000e+02</td>\n",
       "      <td>500.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>6.457423e-02</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>2.295136e-01</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>6.609175e-10</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>1.784060e-06</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>2.814859e-05</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>6.038292e-04</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>1.000000e+00</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        probability  result\n",
       "count  5.000000e+02   500.0\n",
       "mean   6.457423e-02     0.0\n",
       "std    2.295136e-01     0.0\n",
       "min    6.609175e-10     0.0\n",
       "25%    1.784060e-06     0.0\n",
       "50%    2.814859e-05     0.0\n",
       "75%    6.038292e-04     0.0\n",
       "max    1.000000e+00     0.0"
      ]
     },
     "execution_count": 209,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "TEST['result']=0\n",
    "TEST.describe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {},
   "outputs": [],
   "source": [
    "tread=TEST['probability'].quantile(0.938)\n",
    "index1=TEST[TEST['probability']>tread].index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 211,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.5189866422414741"
      ]
     },
     "execution_count": 211,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tread"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 212,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "31"
      ]
     },
     "execution_count": 212,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#标准1320个\n",
    "len(index1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 213,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>probability</th>\n",
       "      <th>result</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>5.000000e+02</td>\n",
       "      <td>500.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>6.457423e-02</td>\n",
       "      <td>0.062000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>2.295136e-01</td>\n",
       "      <td>0.241397</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>6.609175e-10</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>1.784060e-06</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>2.814859e-05</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>6.038292e-04</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>1.000000e+00</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        probability      result\n",
       "count  5.000000e+02  500.000000\n",
       "mean   6.457423e-02    0.062000\n",
       "std    2.295136e-01    0.241397\n",
       "min    6.609175e-10    0.000000\n",
       "25%    1.784060e-06    0.000000\n",
       "50%    2.814859e-05    0.000000\n",
       "75%    6.038292e-04    0.000000\n",
       "max    1.000000e+00    1.000000"
      ]
     },
     "execution_count": 213,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "TEST.loc[index1,'result']=1\n",
    "TEST.describe()\n",
    "# TEST=TEST.drop('GBDT_FTRL',axis=1)\n",
    "# result=pd.DataFrame(y_predict[0],index=xgb_onehotDfforTest.index,columns=['GBDT_FTRL'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 214,
   "metadata": {},
   "outputs": [],
   "source": [
    "TEST=TEST.drop('probability',axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 215,
   "metadata": {},
   "outputs": [],
   "source": [
    "# TEST.columns=['GBDT_FTRL']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 216,
   "metadata": {},
   "outputs": [],
   "source": [
    "TEST.to_csv('newResult.csv',index_label='idx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 217,
   "metadata": {},
   "outputs": [],
   "source": [
    "one=TEST[TEST.result==1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 218,
   "metadata": {},
   "outputs": [],
   "source": [
    "one.to_csv(\"one5.9.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 219,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>result</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>361369d3-b7e1-4c73-beb1-a5fbe7efa4d0</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>54f471ef-31ec-4c47-aed0-f8862e6eceac</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7b4c40d8-48ae-42c7-9696-184d77bbcda2</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7bd7c435-6fb5-4acc-8176-62ccc9ee3a18</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7e5f95aa-283f-4a16-9c68-167347067d20</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8b80fed7-95bd-4103-bbc0-526df7f4e738</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9e97c06d-b17f-46b8-a6dc-e76e58a475b7</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9fba40a9-cf3f-44ca-bbc3-9f8143934b1a</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>a08a7814-b140-440c-9959-fce10ab8b862</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>a28bc5dc-e3f1-4395-8de2-ee3e126880c2</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>a5853b12-06bc-4648-bc9c-3a8ca48eb858</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>abe7667e-660d-43ab-bb99-8b97b96e3cf4</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ac45a077-4951-400a-91f0-e5e49dec0f9d</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>af480586-2e81-4c46-9a74-c95685afedf7</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>b133e35e-adaf-403b-aa43-d9159476d03e</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>b5bdf8aa-300f-430a-87ce-025a55f31372</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>b6395523-5708-43ad-8c3c-3de952cb636f</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>b703415a-8b0f-45c5-a242-50b86c5c79f8</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>c041ab07-fe4f-4982-8e2e-a54ba65f70fe</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>c06d8bfc-0daa-4982-a36f-e0cc31dfc545</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>c971e373-17ea-4263-955e-6d25e1e7847b</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>cb509db4-d018-4521-a50e-644eb81a8937</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>d0675a48-5829-40b0-ae53-d206bac69967</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>d18ca714-2c4f-4bbd-a10c-f7f5dff8974a</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>d76dea2e-df86-415f-a771-878aac11393b</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>e209cb45-5588-4661-8ea0-63066fb4a2de</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>e7a35fc8-babc-4f03-aa51-674260ea9798</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>e8540e54-6164-40bd-85c6-ee4d31fba796</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>e96489f7-50f3-4a72-a287-baba90c55f95</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>ee7ee67f-4d64-4157-b64b-75ede2b8e54d</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>f035c99a-4812-4175-9764-bafb50fb5917</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                      result\n",
       "361369d3-b7e1-4c73-beb1-a5fbe7efa4d0       1\n",
       "54f471ef-31ec-4c47-aed0-f8862e6eceac       1\n",
       "7b4c40d8-48ae-42c7-9696-184d77bbcda2       1\n",
       "7bd7c435-6fb5-4acc-8176-62ccc9ee3a18       1\n",
       "7e5f95aa-283f-4a16-9c68-167347067d20       1\n",
       "8b80fed7-95bd-4103-bbc0-526df7f4e738       1\n",
       "9e97c06d-b17f-46b8-a6dc-e76e58a475b7       1\n",
       "9fba40a9-cf3f-44ca-bbc3-9f8143934b1a       1\n",
       "a08a7814-b140-440c-9959-fce10ab8b862       1\n",
       "a28bc5dc-e3f1-4395-8de2-ee3e126880c2       1\n",
       "a5853b12-06bc-4648-bc9c-3a8ca48eb858       1\n",
       "abe7667e-660d-43ab-bb99-8b97b96e3cf4       1\n",
       "ac45a077-4951-400a-91f0-e5e49dec0f9d       1\n",
       "af480586-2e81-4c46-9a74-c95685afedf7       1\n",
       "b133e35e-adaf-403b-aa43-d9159476d03e       1\n",
       "b5bdf8aa-300f-430a-87ce-025a55f31372       1\n",
       "b6395523-5708-43ad-8c3c-3de952cb636f       1\n",
       "b703415a-8b0f-45c5-a242-50b86c5c79f8       1\n",
       "c041ab07-fe4f-4982-8e2e-a54ba65f70fe       1\n",
       "c06d8bfc-0daa-4982-a36f-e0cc31dfc545       1\n",
       "c971e373-17ea-4263-955e-6d25e1e7847b       1\n",
       "cb509db4-d018-4521-a50e-644eb81a8937       1\n",
       "d0675a48-5829-40b0-ae53-d206bac69967       1\n",
       "d18ca714-2c4f-4bbd-a10c-f7f5dff8974a       1\n",
       "d76dea2e-df86-415f-a771-878aac11393b       1\n",
       "e209cb45-5588-4661-8ea0-63066fb4a2de       1\n",
       "e7a35fc8-babc-4f03-aa51-674260ea9798       1\n",
       "e8540e54-6164-40bd-85c6-ee4d31fba796       1\n",
       "e96489f7-50f3-4a72-a287-baba90c55f95       1\n",
       "ee7ee67f-4d64-4157-b64b-75ede2b8e54d       1\n",
       "f035c99a-4812-4175-9764-bafb50fb5917       1"
      ]
     },
     "execution_count": 219,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "one"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
