{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import jieba\n",
    "import numpy as np\n",
    "from time import time\n",
    "from tqdm import tqdm\n",
    "from sklearn import metrics\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "def loadfile(filepath):\n",
    "    '''加载文件内容和标签'''\n",
    "    categories = os.listdir(filepath)\n",
    "    content = []\n",
    "    label = []\n",
    "    num_label = []\n",
    "    num_to_label = {}\n",
    "    for i, category in tqdm(enumerate(categories)):\n",
    "        category_path = os.path.join(filepath, category)\n",
    "        filenames = os.listdir(category_path)\n",
    "        for fname in filenames:\n",
    "            fpath = os.path.join(category_path, fname)\n",
    "            with open(fpath, encoding='utf-8') as f:\n",
    "                sentences = f.readlines()\n",
    "                sent_words = [list(jieba.cut(sent0.strip('\\n').strip())) for sent0 in sentences]\n",
    "                document = \" \".join(\" \".join(sent0) for sent0 in sent_words)\n",
    "                content.append(document)\n",
    "                # label.append(category)\n",
    "\n",
    "        # content.extend(cat_docs)\n",
    "        label.extend([category] * len(filenames))\n",
    "        num_label.extend([i] * len(filenames))\n",
    "        num_to_label.setdefault(i, category)\n",
    "    return content, label, num_label, num_to_label"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\86131\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 1.408 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "10it [15:08, 90.80s/it] "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "execute time: 908.0324215888977 s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "start = time()\n",
    "# 执行很慢，大约15分钟\n",
    "corpus_root_path = 'D:/大三下/NLP_Foundation/datasets/thucnews'\n",
    "content, label, num_label, num_to_label = loadfile(corpus_root_path)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "65000\n",
      "65000\n"
     ]
    }
   ],
   "source": [
    "print(len(content))\n",
    "print(len(label))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "outputs": [],
   "source": [
    "import pickle\n",
    "\n",
    "# 保存数据\n",
    "res_root_path = '../../docs/text_classification/thucnews_result/'\n",
    "with open(res_root_path + 'content.pickle', 'wb') as file:\n",
    "    pickle.dump(content, file)\n",
    "\n",
    "with open(res_root_path + 'label.pickle', 'wb') as file:\n",
    "    pickle.dump(label, file)\n",
    "\n",
    "with open(res_root_path + 'num_label.pickle', 'wb') as file:\n",
    "    pickle.dump(num_label, file)\n",
    "\n",
    "with open(res_root_path + 'num_to_label.pickle', 'wb') as file:\n",
    "    pickle.dump(num_to_label, file)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "# 读取数据\n",
    "import pickle\n",
    "\n",
    "res_root_path = '../../docs/text_classification/thucnews_result/'\n",
    "with open(res_root_path + 'content.pickle', 'rb') as file:\n",
    "    content = pickle.load(file)\n",
    "\n",
    "with open(res_root_path + 'label.pickle', 'rb') as file:\n",
    "    label = pickle.load(file)\n",
    "\n",
    "with open(res_root_path + 'num_label.pickle', 'rb') as file:\n",
    "    num_label = pickle.load(file)\n",
    "\n",
    "with open(res_root_path + 'num_to_label.pickle', 'rb') as file:\n",
    "    num_to_label = pickle.load(file)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "# 加载停用词\n",
    "stopwords_path = '../../docs/text_classification/stopwords/cn_stopwords.txt'\n",
    "with open(stopwords_path, encoding='utf-8') as file:\n",
    "    stopwords = file.read().split(\"\\n\")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Documents\\anaconda3\\envs\\ml\\lib\\site-packages\\sklearn\\feature_extraction\\text.py:401: UserWarning: Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens ['若果'] not in stop_words.\n",
      "  % sorted(inconsistent)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "# TfidfVectorizer可以把原始文本转化为tf-idf的特征矩阵，从而为后续的文本相似度计算，主题模型(如LSI)，文本搜索排序等一系列应用奠定基础\n",
    "tfidf = TfidfVectorizer(stop_words=stopwords)\n",
    "# tfidf.fit_transform 参数为list ,返回结果类型 scipy.sparse.csr.csr_matrix\n",
    "X = tfidf.fit_transform(content)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(65000, 375785)\n"
     ]
    }
   ],
   "source": [
    "print(X.shape)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "X_train, X_test, y_train, y_test = train_test_split(X, label, random_state=41)\n",
    "y_num_train, y_num_test = train_test_split(num_label, random_state=41)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{0: '体育', 1: '娱乐', 2: '家居', 3: '房产', 4: '教育', 5: '时尚', 6: '时政', 7: '游戏', 8: '科技', 9: '财经'}\n"
     ]
    },
    {
     "data": {
      "text/plain": "(['时政', '游戏', '时尚', '房产', '娱乐'], [6, 7, 5, 3, 1])"
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(num_to_label)\n",
    "y_train[:5], y_num_train[:5]"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [],
   "source": [
    "def run_assess(model_name, predict_test, y_test):\n",
    "    print(model_name + \"文本分类的准确率为：\", metrics.accuracy_score(predict_test, y_test))\n",
    "    print(model_name + \"文本分类的精度率为：\", metrics.precision_score(predict_test, y_test, average='weighted'))\n",
    "    print(model_name + \"文本分类的召回率为：\", metrics.recall_score(predict_test, y_test, average='weighted'))\n",
    "    print(model_name + \"文本分类的F1值为：\", metrics.f1_score(predict_test, y_test, average='weighted'))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "多项式朴素贝叶斯文本分类的准确率为： 0.9518153846153846\n",
      "多项式朴素贝叶斯文本分类的精度率为： 0.9524158302431509\n",
      "多项式朴素贝叶斯文本分类的召回率为： 0.9518153846153846\n",
      "多项式朴素贝叶斯文本分类的F1值为： 0.9518469520589389\n"
     ]
    }
   ],
   "source": [
    "#多项式朴素贝叶斯\n",
    "nb_model = MultinomialNB(alpha=0.001)\n",
    "nb_model.fit(X_train, y_train)\n",
    "nb_predict = nb_model.predict(X_test)\n",
    "\n",
    "model_name = \"多项式朴素贝叶斯\"\n",
    "run_assess(model_name, nb_predict, y_test)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bernoulli贝叶斯文本分类的准确率为： 0.9216\n",
      "bernoulli贝叶斯文本分类的精度率为： 0.9224621005187448\n",
      "bernoulli贝叶斯文本分类的召回率为： 0.9216\n",
      "bernoulli贝叶斯文本分类的F1值为： 0.9216402632186084\n"
     ]
    }
   ],
   "source": [
    "#bernoulli朴素贝叶斯\n",
    "from sklearn.naive_bayes import BernoulliNB\n",
    "\n",
    "ber_model = BernoulliNB(alpha=0.001)\n",
    "ber_model.fit(X_train, y_train)\n",
    "ber_predict = ber_model.predict(X_test)\n",
    "\n",
    "model_name = \"bernoulli贝叶斯\"\n",
    "run_assess(model_name, ber_predict, y_test)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "complement贝叶斯文本分类的准确率为： 0.9506461538461538\n",
      "complement贝叶斯文本分类的精度率为： 0.9517729107945467\n",
      "complement贝叶斯文本分类的召回率为： 0.9506461538461538\n",
      "complement贝叶斯文本分类的F1值为： 0.9508424085536041\n"
     ]
    }
   ],
   "source": [
    "# Complement朴素贝叶斯\n",
    "from sklearn.naive_bayes import ComplementNB\n",
    "\n",
    "com_model = ComplementNB(alpha=0.001)\n",
    "com_model.fit(X_train, y_train)\n",
    "com_predict = com_model.predict(X_test)\n",
    "\n",
    "model_name = \"complement贝叶斯\"\n",
    "run_assess(model_name, com_predict, y_test)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "决策树文本分类的准确率为： 0.900676923076923\n",
      "决策树文本分类的精度率为： 0.9003102642598887\n",
      "决策树文本分类的召回率为： 0.900676923076923\n",
      "决策树文本分类的F1值为： 0.9004476976007371\n",
      "execute time: 236.16783499717712 s\n"
     ]
    }
   ],
   "source": [
    "# 决策树\n",
    "start = time()\n",
    "\n",
    "from sklearn import tree\n",
    "\n",
    "dtc = tree.DecisionTreeClassifier()\n",
    "dtc = dtc.fit(X_train, y_train)\n",
    "dtc_predict = dtc.predict(X_test)\n",
    "\n",
    "model_name = \"决策树\"\n",
    "run_assess(model_name, dtc_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最近邻文本分类的准确率为： 0.9152615384615385\n",
      "最近邻文本分类的精度率为： 0.9162698394880093\n",
      "最近邻文本分类的召回率为： 0.9152615384615385\n",
      "最近邻文本分类的F1值为： 0.915424521033861\n",
      "execute time: 89.27723741531372 s\n"
     ]
    }
   ],
   "source": [
    "# KNN\n",
    "start = time()\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "\n",
    "knn = KNeighborsClassifier(n_neighbors=3)\n",
    "knn = knn.fit(X_train, y_train)\n",
    "knn_predict = knn.predict(X_test)\n",
    "\n",
    "model_name = \"最近邻\"\n",
    "run_assess(model_name, knn_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Documents\\anaconda3\\envs\\ml\\lib\\site-packages\\sklearn\\neural_network\\_multilayer_perceptron.py:549: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "  self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "多层感知机文本分类的准确率为： 0.9651076923076923\n",
      "多层感知机文本分类的精度率为： 0.9651190875244321\n",
      "多层感知机文本分类的召回率为： 0.9651076923076923\n",
      "多层感知机文本分类的F1值为： 0.965084837582065\n",
      "execute time: 547.9201447963715 s\n"
     ]
    }
   ],
   "source": [
    "# 神经网络多层感知机\n",
    "start = time()\n",
    "\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "\n",
    "mlpc = MLPClassifier(alpha=1e-05, hidden_layer_sizes=(15,), random_state=1,\n",
    "                     solver='lbfgs')\n",
    "mlpc.fit(X_train, y_train)\n",
    "mlpc_predict = mlpc.predict(X_test)\n",
    "\n",
    "model_name = \"多层感知机\"\n",
    "run_assess(model_name, mlpc_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "随机森林文本分类的准确率为： 0.8959384615384616\n",
      "随机森林文本分类的精度率为： 0.8977806307859009\n",
      "随机森林文本分类的召回率为： 0.8959384615384616\n",
      "随机森林文本分类的F1值为： 0.8959088575804269\n",
      "execute time: 120.49425840377808 s\n"
     ]
    }
   ],
   "source": [
    "# 随机森林\n",
    "start = time()\n",
    "\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "\n",
    "rfc = RandomForestClassifier(n_estimators=10).fit(X_train, y_train)\n",
    "rfc_predict = rfc.predict(X_test)\n",
    "\n",
    "model_name = \"随机森林\"\n",
    "run_assess(model_name, rfc_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Documents\\anaconda3\\envs\\ml\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LogisticRegression文本分类的准确率为： 0.9635692307692307\n",
      "LogisticRegression文本分类的精度率为： 0.9638134864726087\n",
      "LogisticRegression文本分类的召回率为： 0.9635692307692307\n",
      "LogisticRegression文本分类的F1值为： 0.9635682118274916\n",
      "execute time: 391.5519254207611 s\n"
     ]
    }
   ],
   "source": [
    "# LogisticRegression\n",
    "start = time()\n",
    "\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "log_model = LogisticRegression().fit(X_train, y_train)\n",
    "log_predict = log_model.predict(X_test)\n",
    "\n",
    "model_name = \"LogisticRegression\"\n",
    "run_assess(model_name, log_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LightGBM文本分类的准确率为： 0.9731692307692308\n",
      "LightGBM文本分类的精度率为： 0.9733056289365806\n",
      "LightGBM文本分类的召回率为： 0.9731692307692308\n",
      "LightGBM文本分类的F1值为： 0.9731376284840774\n",
      "execute time: 816.7429533004761 s\n"
     ]
    }
   ],
   "source": [
    "# LGBMClassifier\n",
    "start = time()\n",
    "\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.multiclass import OneVsRestClassifier\n",
    "\n",
    "clf_multilabel = OneVsRestClassifier(LGBMClassifier())\n",
    "\n",
    "clf_multilabel.fit(X_train, y_train)\n",
    "lgbm_predict = clf_multilabel.predict(X_test)\n",
    "\n",
    "model_name = \"LightGBM\"\n",
    "run_assess(model_name, lgbm_predict, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Documents\\anaconda3\\envs\\ml\\lib\\site-packages\\xgboost\\sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n",
      "  warnings.warn(label_encoder_deprecation_msg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12:33:43] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n",
      "xgboost文本分类的准确率为： 0.9692923076923077\n",
      "xgboost文本分类的精度率为： 0.969366073096577\n",
      "xgboost文本分类的召回率为： 0.9692923076923077\n",
      "xgboost文本分类的F1值为： 0.9692471585484635\n",
      "execute time: 639.9266865253448 s\n"
     ]
    }
   ],
   "source": [
    "# xgboost\n",
    "start = time()\n",
    "\n",
    "import xgboost as xgb\n",
    "\n",
    "xgb_clf = xgb.XGBClassifier().fit(X_train, y_train)\n",
    "\n",
    "xgb_clf_pred = xgb_clf.predict(X_test)\n",
    "\n",
    "model_name = \"xgboost\"\n",
    "run_assess(model_name, xgb_clf_pred, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVM文本分类的准确率为： 0.9643076923076923\n",
      "SVM文本分类的精度率为： 0.9645288122160475\n",
      "SVM文本分类的召回率为： 0.9643076923076923\n",
      "SVM文本分类的F1值为： 0.9642774958876801\n",
      "execute time: 5209.842105388641 s\n"
     ]
    }
   ],
   "source": [
    "#  svm\n",
    "start = time()\n",
    "\n",
    "from sklearn import svm\n",
    "\n",
    "svm_clf = svm.SVC(decision_function_shape='ovo').fit(X_train, y_train)\n",
    "svm_pred = svm_clf.predict(X_test)\n",
    "\n",
    "model_name = \"SVM\"\n",
    "run_assess(model_name, svm_pred, y_test)\n",
    "\n",
    "end = time()\n",
    "print(\"execute time: {} s\".format(end - start))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# cnn模型\n",
    "# https://github.com/gaussic/text-classification-cnn-rnn"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "conda-env-ml-py",
   "language": "python",
   "display_name": "Python [conda env:ml] *"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}