{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from joblib import Parallel, delayed\n",
    "import time\n",
    "import numpy as np\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "from sklearn.decomposition import PCA,TruncatedSVD\n",
    "from sklearn import manifold\n",
    "from itertools import combinations,permutations\n",
    "from sklearn import preprocessing\n",
    "import math\n",
    "import tsfresh as tsf\n",
    "from tsfresh import extract_features, select_features\n",
    "from tsfresh.utilities.dataframe_functions import impute\n",
    "from tsfresh.feature_extraction import ComprehensiveFCParameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data = pd.read_hdf('data/train.h5')\n",
    "test_all_data = pd.read_hdf('data/test.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data[\"type\"] = train_all_data[\"type\"].map({'围网':0,'刺网':1,'拖网':2})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 数据清洗"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将面积为0 的点 的速度都设置为0\n",
    "def get_areas(dataframe):\n",
    "    dataframe_ship_group = dataframe.groupby('ship')\n",
    "    x_max_min_Series = dataframe_ship_group.apply(lambda t: t.x.max()-t.x.min())\n",
    "    y_max_min_Series = dataframe_ship_group.apply(lambda t: t.y.max()-t.y.min())\n",
    "    areas = x_max_min_Series.multiply(y_max_min_Series) # 面积\n",
    "    return areas\n",
    "\n",
    "train_areas = get_areas(train_all_data)\n",
    "test_areas = get_areas(test_all_data)\n",
    "\n",
    "zeros_train_areas = train_areas.loc[train_areas==0]\n",
    "zeros_test_areas = test_areas.loc[test_areas==0]\n",
    "\n",
    "train_all_data.loc[zeros_train_areas.index,'v']=0\n",
    "test_all_data.loc[zeros_test_areas.index,'v']=0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def distance(m, n):\n",
    "    \"\"\"calculate Euclidean Distance\"\"\"\n",
    "    return np.sqrt(np.sum((m - n) ** 2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_feature(df,train):\n",
    "    #unique, mean, std, var, min, quantile0.25, median, quantile0.75, max, mode特征\n",
    "    for s in ['x', 'y', 'v', 'd', 'month', 'day', 'hour',\n",
    "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
    "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
    "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
    "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
    "       'biaozhun_second', 'biaozhun_weekday', 'ln_x', 'log2_x', 'log10_x',\n",
    "       'ln_y', 'log2_y', 'log10_y', 'ln_month', 'log2_month', 'log10_month',\n",
    "       'ln_day', 'log2_day', 'log10_day', 'shift_x', 'shift2_x', 'shift_y',\n",
    "       'shift2_y', 'shift_v', 'shift2_v', 'shift_d', 'shift2_d', 'shift_month',\n",
    "       'shift2_month', 'shift_day', 'shift2_day', 'shift_hour', 'shift2_hour',\n",
    "       'shift_minute', 'shift2_minute', 'shift_second', 'shift2_second',\n",
    "       'shift_weekday', 'shift2_weekday']:\n",
    "        t = df.groupby('ship')[s].agg({\n",
    "                                        'nunique_' + s: 'nunique', \n",
    "                                        'mean_' + s: 'mean', \n",
    "                                        'sum_' + s: 'sum',\n",
    "                                        'count_' + s: 'count',\n",
    "                                        'std_' + s: 'std', \n",
    "                                        'var_' + s: 'var',\n",
    "                                        'skew_' + s: 'skew',\n",
    "                                        'min_' + s: 'min',\n",
    "                                        'quantile0.25_' + s: lambda x: x.quantile(0.25),\n",
    "                                        'median_' + s: 'median',\n",
    "                                        'quantile0.75_' + s: lambda x: x.quantile(0.75),\n",
    "                                        'max_' + s: 'max',\n",
    "                                        'mode_' + s: lambda x: np.mean(pd.Series.mode(x))}).reset_index()\n",
    "        train = pd.merge(train,t, on='ship',how='left')\n",
    "    \n",
    "    #构建x,y坐标交互特征\n",
    "    train['x_max_x_min'] = train['max_x'] - train['min_x']\n",
    "    train['y_max_y_min'] = train['max_y'] - train['min_y']\n",
    "    train['y_max_x_min'] = train['max_y'] - train['min_x']\n",
    "    train['x_max_y_min'] = train['max_x'] - train['min_y']\n",
    "    train['rec_area'] = train['y_max_y_min'] * train['x_max_x_min']\n",
    "    train['slope'] = train['y_max_y_min'] / np.where(train['x_max_x_min']==0, 0.001, train['x_max_x_min'])\n",
    "    \n",
    "    #活动半径\n",
    "    min_point = np.array([train['min_x'], train['min_y']])\n",
    "    center_point = np.array([train['median_x'], train['median_y']])\n",
    "    max_point = np.array([train['max_x'], train['max_y']])\n",
    "    train['short_r'] = distance(min_point, center_point)\n",
    "    train['long_r'] = distance(max_point, center_point)\n",
    "    \n",
    "       \n",
    "    #缺失值个数比例\n",
    "    train['direction_miss_rate'] = (df['d']==0).sum() / len(df)\n",
    "    train['speed_miss_rate'] = (df['v']==0).sum() / len(df)\n",
    "    train['direct&speed_miss_rate'] = len(df[df.d==0][df.v==0]) / len(df)\n",
    "    return train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_dt(df):\n",
    "    # 转换时间并做归一化\n",
    "    df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')\n",
    "    df['month'] = df['time'].dt.month / 12\n",
    "    df['day'] = df['time'].dt.day / 31\n",
    "    df['hour'] = df['time'].dt.hour / 24\n",
    "    df['minute'] = df['time'].dt.minute / 60\n",
    "    df['second'] = df['time'].dt.second / 60\n",
    "    df['weekday'] = df['time'].dt.weekday / 7\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sub_k_by_key(serisdata,klist,key):\n",
    "    # 将x,y,v,d 都转为一个值可以代表的\n",
    "    pca_data  = serisdata[[\"x\",\"y\",\"v\",\"d\"]].T\n",
    "    pca_transform_data = PCA(n_components=1).fit_transform(pca_data)\n",
    "    serisdata[\"pca_x\"]=pca_transform_data[0][0]\n",
    "    serisdata[\"pca_y\"]=pca_transform_data[1][0]\n",
    "    serisdata[\"pca_v\"]=pca_transform_data[2][0]\n",
    "    serisdata[\"pca_d\"]=pca_transform_data[3][0]\n",
    "    \n",
    "    svd_transform_data = TruncatedSVD(n_components=1).fit_transform(pca_data)\n",
    "    serisdata[\"svd_x\"]=svd_transform_data[0][0]\n",
    "    serisdata[\"svd_y\"]=svd_transform_data[1][0]\n",
    "    serisdata[\"svd_v\"]=svd_transform_data[2][0]\n",
    "    serisdata[\"svd_d\"]=svd_transform_data[3][0]\n",
    "    \n",
    "    tsne_transform_data = manifold.TSNE(n_components=1, init='pca', random_state=0).fit_transform(pca_data)\n",
    "    serisdata[\"tsne_x\"]=tsne_transform_data[0][0]\n",
    "    serisdata[\"tsne_y\"]=tsne_transform_data[1][0]\n",
    "    serisdata[\"tsne_v\"]=tsne_transform_data[2][0]\n",
    "    serisdata[\"tsne_d\"]=tsne_transform_data[3][0]\n",
    "    \n",
    "    \"\"\"算开始 结束 和差值\"\"\"\n",
    "    sort_series = serisdata.sort_values(by=key,ascending=False)\n",
    "    for k in klist:\n",
    "        serisdata[\"start_\"+k]=sort_series.iloc[-1][k]\n",
    "        serisdata[\"end_\"+k]=sort_series.iloc[0][k]\n",
    "        serisdata[\"start-end_\"+k]=sort_series.iloc[0][k] - sort_series.iloc[-1][k]\n",
    "    return serisdata"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对时间进行处理\n",
    "train_all_data = extract_dt(train_all_data)\n",
    "test_all_data = extract_dt(test_all_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对方向做归一化 0到360度\n",
    "train_all_data[\"guiyi_d\"] = train_all_data['d'] / 360\n",
    "test_all_data[\"guiyi_d\"] = test_all_data['d'] / 360 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "重组前的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d'],\n",
      "      dtype='object')\n",
      "重组后的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_'],\n",
      "      dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# 特征组合 对每一列分别相乘 迪卡尔积\n",
    "print(\"重组前的columns：{}\".format(train_all_data.columns))\n",
    "\n",
    "def combin_func(df):\n",
    "    combinations_list = ['x', 'y', 'v', 'd']\n",
    "\n",
    "    for k in range(2,len(combinations_list)+1):\n",
    "        for combination in combinations(combinations_list, k):\n",
    "            tmp_k = \"\"\n",
    "            tmp_v = 1\n",
    "            for ele in combination:\n",
    "                tmp_k += ele+\"_\"\n",
    "                tmp_v *= df[ele]\n",
    "            df[tmp_k]=tmp_v\n",
    "    return df\n",
    "\n",
    "train_all_data = combin_func(train_all_data)\n",
    "test_all_data = combin_func(test_all_data)\n",
    "\n",
    "print(\"重组后的columns：{}\".format(train_all_data.columns))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "重组前的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_'],\n",
      "      dtype='object')\n",
      "重组后的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
      "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
      "       'biaozhun_second', 'biaozhun_weekday'],\n",
      "      dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# 标准化+正则化+归一化\n",
    "print(\"重组前的columns：{}\".format(train_all_data.columns))\n",
    "\n",
    "def preprocess_func(df):\n",
    "    combinations_list = ['x', 'y', 'v', 'd','month', 'day', 'hour','minute', 'second', 'weekday']\n",
    "    for key in combinations_list:\n",
    "        df[\"biaozhun_\"+key] = pd.Series(preprocessing.scale(df[key]))\n",
    "        #df[\"zhenze_\"+key] = pd.Series(preprocessing.normalize(data_seris2, norm='l2'))\n",
    "        #df[\"guiyi2_\"+key] = pd.Series(preprocessing.MinMaxScaler().fit_transform(data_seris2))\n",
    "    return df\n",
    "        \n",
    "\n",
    "train_all_data = preprocess_func(train_all_data)\n",
    "test_all_data = preprocess_func(test_all_data)\n",
    "\n",
    "print(\"重组后的columns：{}\".format(train_all_data.columns))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "重组前的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
      "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
      "       'biaozhun_second', 'biaozhun_weekday'],\n",
      "      dtype='object')\n",
      "重组后的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
      "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
      "       'biaozhun_second', 'biaozhun_weekday', 'ln_x', 'log2_x', 'log10_x',\n",
      "       'ln_y', 'log2_y', 'log10_y', 'ln_month', 'log2_month', 'log10_month',\n",
      "       'ln_day', 'log2_day', 'log10_day'],\n",
      "      dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# ln\n",
    "print(\"重组前的columns：{}\".format(train_all_data.columns))\n",
    "\n",
    "def ln_func(df):\n",
    "    combinations_list = ['x', 'y', 'month', 'day']\n",
    "    for key in combinations_list:\n",
    "        df[\"ln_\"+key] = df[key].apply(lambda x: math.log(x))\n",
    "        df[\"log2_\"+key] = df[key].apply(lambda x: math.log(x,2))\n",
    "        df[\"log10_\"+key] = df[key].apply(lambda x: math.log(x,10))\n",
    "    return df\n",
    "        \n",
    "\n",
    "train_all_data = ln_func(train_all_data)\n",
    "test_all_data = ln_func(test_all_data)\n",
    "\n",
    "print(\"重组后的columns：{}\".format(train_all_data.columns))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "重组前的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
      "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
      "       'biaozhun_second', 'biaozhun_weekday', 'ln_x', 'log2_x', 'log10_x',\n",
      "       'ln_y', 'log2_y', 'log10_y', 'ln_month', 'log2_month', 'log10_month',\n",
      "       'ln_day', 'log2_day', 'log10_day'],\n",
      "      dtype='object')\n",
      "重组后的columns：Index(['ship', 'x', 'y', 'v', 'd', 'time', 'type', 'month', 'day', 'hour',\n",
      "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
      "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
      "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
      "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
      "       'biaozhun_second', 'biaozhun_weekday', 'ln_x', 'log2_x', 'log10_x',\n",
      "       'ln_y', 'log2_y', 'log10_y', 'ln_month', 'log2_month', 'log10_month',\n",
      "       'ln_day', 'log2_day', 'log10_day', 'shift_x', 'shift2_x', 'shift_y',\n",
      "       'shift2_y', 'shift_v', 'shift2_v', 'shift_d', 'shift2_d', 'shift_month',\n",
      "       'shift2_month', 'shift_day', 'shift2_day', 'shift_hour', 'shift2_hour',\n",
      "       'shift_minute', 'shift2_minute', 'shift_second', 'shift2_second',\n",
      "       'shift_weekday', 'shift2_weekday'],\n",
      "      dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# shift\n",
    "print(\"重组前的columns：{}\".format(train_all_data.columns))\n",
    "\n",
    "def shift_func(df):\n",
    "    dfgroupby = df.groupby('ship')\n",
    "    combinations_list = ['x', 'y', 'v', 'd', 'month', 'day', 'hour','minute', 'second', 'weekday']\n",
    "    for key in combinations_list:\n",
    "        df['shift_'+key] = dfgroupby[key].shift(1);\n",
    "        df['shift_'+key].fillna(0, inplace = True)\n",
    "        \n",
    "        df['shift2_'+key] = dfgroupby[key].shift(-1);\n",
    "        df['shift2_'+key].fillna(0, inplace = True)\n",
    "    return df\n",
    "        \n",
    "\n",
    "train_all_data = shift_func(train_all_data)\n",
    "test_all_data = shift_func(test_all_data)\n",
    "\n",
    "print(\"重组后的columns：{}\".format(train_all_data.columns))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-15-495d6fdc0306>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m        'shift_weekday', 'shift2_weekday']\n\u001b[1;32m     15\u001b[0m \u001b[0mtrain_all_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrain_all_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgroupby\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'ship'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0msub_k_by_key\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msubkbykeylist\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"time\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtest_all_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtest_all_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgroupby\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'ship'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0msub_k_by_key\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msubkbykeylist\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"time\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;31m# 做统计特征 将数据压缩成7000和2000\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/pandas/core/groupby/groupby.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, func, *args, **kwargs)\u001b[0m\n\u001b[1;32m    723\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0moption_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"mode.chained_assignment\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    724\u001b[0m             \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 725\u001b[0;31m                 \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_python_apply_general\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    726\u001b[0m             \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    727\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/pandas/core/groupby/groupby.py\u001b[0m in \u001b[0;36m_python_apply_general\u001b[0;34m(self, f)\u001b[0m\n\u001b[1;32m    740\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    741\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_python_apply_general\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 742\u001b[0;31m         \u001b[0mkeys\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmutated\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrouper\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_selected_obj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maxis\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    743\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    744\u001b[0m         return self._wrap_applied_output(\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/pandas/core/groupby/ops.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, f, data, axis)\u001b[0m\n\u001b[1;32m    235\u001b[0m             \u001b[0;31m# group might be modified\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    236\u001b[0m             \u001b[0mgroup_axes\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_get_axes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgroup\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 237\u001b[0;31m             \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgroup\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    238\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0m_is_indexed_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mres\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgroup_axes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    239\u001b[0m                 \u001b[0mmutated\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-15-495d6fdc0306>\u001b[0m in \u001b[0;36m<lambda>\u001b[0;34m(t)\u001b[0m\n\u001b[1;32m     14\u001b[0m        'shift_weekday', 'shift2_weekday']\n\u001b[1;32m     15\u001b[0m \u001b[0mtrain_all_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrain_all_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgroupby\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'ship'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0msub_k_by_key\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msubkbykeylist\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"time\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtest_all_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtest_all_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgroupby\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'ship'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0msub_k_by_key\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msubkbykeylist\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"time\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;31m# 做统计特征 将数据压缩成7000和2000\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-8-cdc2631b7c0f>\u001b[0m in \u001b[0;36msub_k_by_key\u001b[0;34m(serisdata, klist, key)\u001b[0m\n\u001b[1;32m     14\u001b[0m     \u001b[0mserisdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"svd_d\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msvd_transform_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m     \u001b[0mtsne_transform_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmanifold\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTSNE\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_components\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'pca'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrandom_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit_transform\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpca_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m     \u001b[0mserisdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"tsne_x\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtsne_transform_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m     \u001b[0mserisdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"tsne_y\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtsne_transform_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py\u001b[0m in \u001b[0;36mfit_transform\u001b[0;34m(self, X, y)\u001b[0m\n\u001b[1;32m    884\u001b[0m             \u001b[0mEmbedding\u001b[0m \u001b[0mof\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mtraining\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mlow\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mdimensional\u001b[0m \u001b[0mspace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    885\u001b[0m         \"\"\"\n\u001b[0;32m--> 886\u001b[0;31m         \u001b[0membedding\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    887\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membedding_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0membedding\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    888\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membedding_\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py\u001b[0m in \u001b[0;36m_fit\u001b[0;34m(self, X, skip_num_points)\u001b[0m\n\u001b[1;32m    796\u001b[0m                           \u001b[0mX_embedded\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mX_embedded\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    797\u001b[0m                           \u001b[0mneighbors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mneighbors_nn\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 798\u001b[0;31m                           skip_num_points=skip_num_points)\n\u001b[0m\u001b[1;32m    799\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    800\u001b[0m     def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded,\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py\u001b[0m in \u001b[0;36m_tsne\u001b[0;34m(self, P, degrees_of_freedom, n_samples, X_embedded, neighbors, skip_num_points)\u001b[0m\n\u001b[1;32m    835\u001b[0m         \u001b[0mP\u001b[0m \u001b[0;34m*=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mearly_exaggeration\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    836\u001b[0m         params, kl_divergence, it = _gradient_descent(obj_func, params,\n\u001b[0;32m--> 837\u001b[0;31m                                                       **opt_args)\n\u001b[0m\u001b[1;32m    838\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mverbose\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    839\u001b[0m             print(\"[t-SNE] KL divergence after %d iterations with early \"\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py\u001b[0m in \u001b[0;36m_gradient_descent\u001b[0;34m(objective, p0, it, n_iter, n_iter_check, n_iter_without_progress, momentum, learning_rate, min_gain, min_grad_norm, verbose, args, kwargs)\u001b[0m\n\u001b[1;32m    356\u001b[0m         \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'compute_error'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcheck_convergence\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mn_iter\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 358\u001b[0;31m         \u001b[0merror\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mobjective\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    359\u001b[0m         \u001b[0mgrad_norm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlinalg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    360\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/software/env/AI/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py\u001b[0m in \u001b[0;36m_kl_divergence_bh\u001b[0;34m(params, P, degrees_of_freedom, n_samples, n_components, angle, skip_num_points, verbose, compute_error, num_threads)\u001b[0m\n\u001b[1;32m    260\u001b[0m                                       \u001b[0mdof\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdegrees_of_freedom\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    261\u001b[0m                                       \u001b[0mcompute_error\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcompute_error\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 262\u001b[0;31m                                       num_threads=num_threads)\n\u001b[0m\u001b[1;32m    263\u001b[0m     \u001b[0mc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m2.0\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mdegrees_of_freedom\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1.0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mdegrees_of_freedom\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    264\u001b[0m     \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mravel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "start = time.clock()\n",
    "\n",
    "subkbykeylist=['x', 'y', 'v', 'd', 'month', 'day', 'hour',\n",
    "       'minute', 'second', 'weekday', 'guiyi_d', 'x_y_', 'x_v_', 'x_d_',\n",
    "       'y_v_', 'y_d_', 'v_d_', 'x_y_v_', 'x_y_d_', 'x_v_d_', 'y_v_d_',\n",
    "       'x_y_v_d_', 'biaozhun_x', 'biaozhun_y', 'biaozhun_v', 'biaozhun_d',\n",
    "       'biaozhun_month', 'biaozhun_day', 'biaozhun_hour', 'biaozhun_minute',\n",
    "       'biaozhun_second', 'biaozhun_weekday', 'ln_x', 'log2_x', 'log10_x',\n",
    "       'ln_y', 'log2_y', 'log10_y', 'ln_month', 'log2_month', 'log10_month',\n",
    "       'ln_day', 'log2_day', 'log10_day', 'shift_x', 'shift2_x', 'shift_y',\n",
    "       'shift2_y', 'shift_v', 'shift2_v', 'shift_d', 'shift2_d', 'shift_month',\n",
    "       'shift2_month', 'shift_day', 'shift2_day', 'shift_hour', 'shift2_hour',\n",
    "       'shift_minute', 'shift2_minute', 'shift_second', 'shift2_second',\n",
    "       'shift_weekday', 'shift2_weekday']\n",
    "train_all_data=train_all_data.groupby('ship').apply(lambda t: sub_k_by_key(t,subkbykeylist,\"time\"))\n",
    "test_all_data=test_all_data.groupby('ship').apply(lambda t: sub_k_by_key(t,subkbykeylist,\"time\"))\n",
    "\n",
    "# 做统计特征 将数据压缩成7000和2000\n",
    "train_min_rows = train_all_data.drop_duplicates('ship')\n",
    "test_min_rows = test_all_data.drop_duplicates('ship')\n",
    "\n",
    "train_all_data = extract_feature(train_all_data,train_min_rows)\n",
    "test_all_data = extract_feature(test_all_data,test_min_rows)\n",
    "end = time.clock()\n",
    "print(str(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 去除不参与训练的列\n",
    "train_all_data = train_all_data.drop(['time',\"ship\"], axis=1)\n",
    "test_all_data = test_all_data.drop(['time'], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_all_data = test_all_data.sort_values(by=[\"ship\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all_data.to_hdf('data/train_transform.h5', key='df', mode='w')\n",
    "test_all_data.to_hdf('data/test_transform.h5', key='df', mode='w')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
