{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据预处理操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.colors as colors\n",
    "from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
    "from sklearn.model_selection import train_test_split\n",
    "from pandas import set_option\n",
    "set_option(\"display.max_rows\", 15)\n",
    "set_option('display.width', 200)\n",
    "import seaborn as sns\n",
    "import math\n",
    "import tensorflow as tf\n",
    "import senutil as sen\n",
    "import time"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "numpy+mkl 版本为17.2  \n",
    "tensorboard，tensorflow 版本为1.13.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pylab import *\n",
    "mpl.rcParams['font.sans-serif'] = ['SimHei']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用GPU训练时候，打开下面注释\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='0'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义要目标曲线PERM、POR、SW\n",
    "element_names = [\"PERM\", \"POR\", \"SW\", \"孔隙度\",\"渗透率\",\"饱和度\",\"SH\"]\n",
    "Reference = [\"POR\",\"SW\",\"PERM\"]\n",
    "element = [\"孔隙度\",\"饱和度\",\"渗透率\"]\n",
    "# element = [\"POR\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义要输入的维度AC、CNL、DEN、GR、RD、RS\n",
    "input_vectors = [\"AC\",\"CNL\",\"DEN\",\"GR\",\"RLLD\",\"RLLS\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 定义要训练的物性参数模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "读取物性参数训练数据，包括POR、SW、PERM等元素曲线  \n",
    "element_name = \"孔隙度\"|\"饱和度\"|\"渗透率\"   \n",
    "读取真实测量数据  \n",
    "Reference_name = \"POR\"|\"SW\"|\"PERM\" (参考的计算物性参数)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "element_name = \"渗透率\"\n",
    "Reference_name = \"PERM\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 选择要使用的模型类型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "model_type：'GRU','NAS','SRU','LSTM','LSTM_Block','IndyGRU','LSTM-GRU','GRU_Block','G-LSTM','IndyLSTM','UGRNNCell'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_type = 'GRU'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置模型保存的文件夹\n",
    "model_save_path = os.path.join(\"model/\", 'multi_' + model_type.lower() + \"_train/\")\n",
    "if os.path.exists(model_save_path):\n",
    "    model_path = model_save_path\n",
    "else:\n",
    "    os.mkdir(model_save_path)\n",
    "    model_path = model_save_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "TrainDataPath = 'data/train/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# model_path = os.path.join(\"model-gui/\", 'multi_gru_train')\n",
    "log_path = \"logs/\"\n",
    "# filename_AB:(1) 井数据1_20190603_孔隙度_训练.csv; (2)井数据1_20190603_饱和度_训练.csv; (3) 井数据1_20190603_渗透率_训练.csv;\n",
    "filename_AB = '井数据1_20190603_渗透率_训练_3.csv'\n",
    "well_name = filename_AB[0:20]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "TrainDataPath = os.path.join(TrainDataPath,filename_AB)\n",
    "TrainDataPath"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "well_name"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 读入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 调用pandas的read_csv()方法时，默认使用C engine作为parser engine，而当文件名中含有中文的时候，用C engine在部分情况下就会出错。所以在调用read_csv()方法时指定engine为Python就可以解决问题了。\n",
    "AB_use = pd.read_csv(TrainDataPath,engine='python',encoding='GBK')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_use "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_use  = AB_use.dropna()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 去除横纵坐标为全0的值\n",
    "# AB_1 =AB_Table[~AB_Table[element].isin([0])]\n",
    "# AB_use =AB_1[~AB_1[Reference].isin([0])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Y_ele = AB_use.loc[:, element]\n",
    "# GT = AB_use.loc[:, Reference]\n",
    "Reference_use = AB_use.loc[:, Reference_name]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Y_ele,Reference_use"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 交会图分析待训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if Reference_name == \"PERM\":\n",
    "    GT = np.log10(Reference_use)\n",
    "#     GT = Reference_use\n",
    "else:\n",
    "    GT = Reference_use"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(GT)\n",
    "GT.max,GT.min"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "flag = element.index(element_name)\n",
    "flag,element_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(24, 24))\n",
    "plt.title(\"relationship\")\n",
    "\n",
    "if flag == 0:\n",
    "    plt.subplot(331)\n",
    "    plt.scatter(GT,Y_ele.iloc[:,0])\n",
    "    plt.xlabel(Reference[0])\n",
    "    plt.ylabel(element[0])\n",
    "    # # plt.ylim(0,50)\n",
    "    # # plt.xlim(0,50)\n",
    "    # plt.legend(loc='best')\n",
    "    plt.grid(True)#显示网格线\"\n",
    "    # plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + 'ValAll.jpg', dpi=220,  bbox_inches='tight')\n",
    "    plt.show()\n",
    "elif flag == 1:\n",
    "    plt.subplot(332)\n",
    "    plt.scatter(GT,Y_ele.iloc[:,1])\n",
    "    plt.xlabel(Reference[1])\n",
    "    plt.ylabel(element[1]) \n",
    "    plt.grid(True)#显示网格线\"\n",
    "    plt.show()\n",
    "elif flag == 2:\n",
    "    plt.subplot(333)\n",
    "    # 下面一行为坐标轴负号不显示乱码问题\n",
    "    plt.rcParams['axes.unicode_minus'] = False \n",
    "    plt.scatter(GT,np.log10(Y_ele.iloc[:,2]))\n",
    "    plt.xlabel(Reference[2])\n",
    "    plt.ylabel(element[2])\n",
    "    plt.grid(True)#显示网格线\"\n",
    "    plt.show()\n",
    "else:\n",
    "    print(\"You Input a wrong Target Parameter!\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 去除横纵坐标为全0的值\n",
    "# GT =GT[~GT.isin([-2])]\n",
    "# AB_use =AB_1[~AB_1[Reference].isin([0])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 抽取待使用的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inputX = AB_use.loc[:,'AC':'RLLS']\n",
    "inputX "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inputY = AB_use.loc[:, element]  \n",
    "inputY"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inputY_GT = AB_use.loc[:, Reference]\n",
    "inputY_GT"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 渗透率取对数log10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if element_name == \"渗透率\":\n",
    "    inputY.loc[:,\"渗透率\"] = np.log10(inputY.loc[:,\"渗透率\"])\n",
    "    inputY_GT.loc[:,\"PERM\"] = np.log10(inputY_GT.loc[:,\"PERM\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 电阻率取对数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inputX.loc[:,\"RLLD\":\"RLLS\"] = np.log10(inputX.loc[:,\"RLLD\":\"RLLS\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "inputX"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 对于数据进行归一化操作"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 定义输入数据所在范围"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if element_name == \"孔隙度\" or \"饱和度\":\n",
    "    AC = [140,350]\n",
    "    CNL = [-0.8,80]\n",
    "    DEN = [1,3]\n",
    "    GR = [0,350]\n",
    "    RD = [0,8]\n",
    "    RS = [0,7]\n",
    "else:\n",
    "    AC = [140,300]\n",
    "    CNL = [-0.8,70]\n",
    "    DEN = [1,3]\n",
    "    GR = [0,350]\n",
    "    RD = [0,5]\n",
    "    RS = [0,5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "POR = [0,17]\n",
    "# 渗透率为渗透率取对数得到的值 PERM = [0,100]\n",
    "PERM = [-3,2]\n",
    "SW = [4,100]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "POR_GT = [0,17]\n",
    "PERM_GT = [-3,2]\n",
    "SW_GT = [4,100]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "u_log_name = [AC,CNL,DEN,GR,RD,RS]\n",
    "e_log_name = [POR,PERM,SW] \n",
    "e_GT_log_name = [POR_GT,PERM_GT,SW_GT]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "u_log_name,e_log_name,e_GT_log_name"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 按照输入范围执行归一化操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def ZeroOneScaler(data,log_name):\n",
    "    ''' 0-1 Normalization\n",
    "\n",
    "    '''\n",
    "    result = data.copy()\n",
    "    for i in range(len(log_name)):\n",
    "        numerator = data.iloc[:,i]-log_name[i][0]\n",
    "        denominator = log_name[i][1]-log_name[i][0]\n",
    "#     numerator = data - np.min(data, 0)\n",
    "#     denominator = np.max(data, 0) - np.min(data, 0)\n",
    "    # noise term prevents the zero division\n",
    "        result.iloc[:,i]= numerator / (denominator + 1e-7)\n",
    "    return  result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_G = ZeroOneScaler(inputX,u_log_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_G"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_Y_G = ZeroOneScaler(inputY,e_GT_log_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_Y_G"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_e_GT = ZeroOneScaler(inputY_GT,e_log_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "AB_e_GT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max(AB_Y_G.渗透率),max(AB_e_GT.PERM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max(inputY.渗透率),max(inputY_GT.PERM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.data.experimental.make_csv_dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 读取相应数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cursor = AB_G.shape[1] - 1\n",
    "# print(cursor)\n",
    "\n",
    "# 取最后一列元素作为标签\n",
    "x_gy = AB_G.loc[:, 'AC':'RLLS']\n",
    "y_gy = AB_Y_G.loc[:, [element_name]]\n",
    "y_gt_gy = AB_e_GT.loc[:, [Reference_name]]\n",
    "# 此处有[]的element_name为一个列表\n",
    "# y = AB.loc[:, [element_name]]\n",
    "np.set_printoptions(suppress=True, threshold=5000)\n",
    "print(y_gy)\n",
    "print(y_gt_gy)\n",
    "\n",
    "print(\"x.shape:\", x_gy.shape)\n",
    "print(\"y.shape:\", y_gy.shape)\n",
    "print(\"gt.shape:\", y_gt_gy.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型设定"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_graph = tf.Graph()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "element_name"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 定义网络模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ==============================初始化网络结构参数================================\n",
    "# 网络结构参数\n",
    "seq_length = 10\n",
    "# 输入维度\n",
    "data_dim = 6\n",
    "hidden_dim = 72\n",
    "# 输出维度\n",
    "# output_dim = 1\n",
    "output_dim = 1\n",
    "learning_rate = 0.0005\n",
    "# keep_prob = 1    # 在训练过程中设置\n",
    "n_layers = 5  # LSTM layer 层数\n",
    "# batch_size = 100\n",
    "iterations = 3000\n",
    "\n",
    "\n",
    "# model_name = well_name + \"_Method-1-Multi\" + str(n_layers) + element_name + \"_lr_\" + str(learning_rate) + \"_iterations_\" + str(iterations)\n",
    "# model_name = well_name + \"_Method-1-Multi_\" + str(n_layers) + \"_layers_\"+ element_name + \"_lr_\" + str(\n",
    "#     learning_rate) + \"_iterations_\" + str(iterations)\n",
    "model_name = well_name + \"_\"+  model_type.lower() +\"_\" + str(n_layers) + \"_layers_\" + element_name + \"_lr_\" + str(\n",
    "        learning_rate) + \"hidden_dim\" +str(hidden_dim)+  \"_iterations_\" + str(iterations)\n",
    "model_file = model_path + model_name\n",
    "model_testing_img_file_saving_path = 'model_testing_images/'\n",
    "model_testing_image_name = model_name + \"testing\"\n",
    "\n",
    "tf.set_random_seed(777)  # for reproducibility"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型单元设计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建 GRU network\n",
    "def gru_cell():\n",
    "    cell = tf.contrib.rnn.GRUCell(hidden_dim)\n",
    "    # ======增加GRU单元，20170710修改=========\n",
    "    with tf.name_scope('gru_dropout'):\n",
    "        gru_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return gru_cell\n",
    "\n",
    "# 构建 GRU_blcok network\n",
    "def gru_block_cell():\n",
    "    cell = tf.contrib.rnn.GRUBlockCell(hidden_dim)\n",
    "    with tf.name_scope('gru_block_dropout'):\n",
    "        gru_block_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return gru_block_cell\n",
    "\n",
    "# 构建 LSTM network\n",
    "def lstm_cell():\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(hidden_dim)\n",
    "    with tf.name_scope('lstm_dropout'):\n",
    "        lstm_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return lstm_cell\n",
    "\n",
    "# 构建 LSTM-block network\n",
    "def lstm_block_cell():\n",
    "    cell = tf.contrib.rnn.LSTMBlockCell(hidden_dim)\n",
    "    with tf.name_scope('lstm_block_dropout'):\n",
    "        lstm_blcok_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return lstm_blcok_cell\n",
    "\n",
    "# 构建 GLSTM network\n",
    "def glstm_cell():\n",
    "    cell = tf.contrib.rnn.GLSTMCell(hidden_dim)\n",
    "    with tf.name_scope('glstm_dropout'):\n",
    "        glstm_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return glstm_cell\n",
    "\n",
    "# 构建 IndyLSTM network\n",
    "def indy_lstm_cell():\n",
    "    cell = tf.contrib.rnn.IndyLSTMCell(hidden_dim)\n",
    "#     cell = tf.contrib.rnn.Conv1DLSTMCell(input_shape=shape, output_channels=output_dim, kernel_shape=1)\n",
    "    with tf.name_scope('indy_lstm_dropout'):\n",
    "        indy_lstm_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return indy_lstm_cell\n",
    "\n",
    "# 构建 IndyLSTM network\n",
    "def indy_gru_cell():\n",
    "    cell = tf.contrib.rnn.IndyGRUCell(hidden_dim)\n",
    "#     cell = tf.contrib.rnn.Conv1DLSTMCell(input_shape=shape, output_channels=output_dim, kernel_shape=1)\n",
    "    with tf.name_scope('indy_gru_dropout'):\n",
    "        indy_gru_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return indy_gru_cell\n",
    "\n",
    "# 构建 UGRNN network\n",
    "def ugrnn_cell():\n",
    "    cell = tf.contrib.rnn.UGRNNCell(hidden_dim)\n",
    "    with tf.name_scope('ugrnn_dropout'):\n",
    "        ugrnn_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return ugrnn_cell\n",
    "\n",
    "# 构建 sru network\n",
    "def sru_cell():\n",
    "    cell = tf.contrib.rnn.SRUCell(hidden_dim)\n",
    "    with tf.name_scope('sru_dropout'):\n",
    "        sru_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return sru_cell\n",
    "\n",
    "# 构建 NAS network\n",
    "def nas_cell():\n",
    "    cell = tf.contrib.rnn.NASCell(hidden_dim)\n",
    "    with tf.name_scope('nas_dropout'):\n",
    "        nas_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "    return nas_cell"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_type_select(model_type):\n",
    "    if model_type == 'GRU':\n",
    "        return gru_cell()\n",
    "    elif model_type == 'GRU_Block':\n",
    "        return gru_block_cell()\n",
    "    elif model_type == 'LSTM':\n",
    "        return lstm_cell()\n",
    "    elif model_type == 'IndyGRU':\n",
    "        return indy_gru_cell()\n",
    "    elif model_type == 'LSTM_Block':\n",
    "        return lstm_block_cell()\n",
    "    elif model_type == 'G-LSTM':\n",
    "        return glstm_cell()\n",
    "    elif model_type == 'IndyLSTM':\n",
    "        return indy_lstm_cell()\n",
    "    elif model_type == 'UGRNNCell':\n",
    "        return ugrnn_cell()\n",
    "    elif model_type == 'SRU':\n",
    "        return sru_cell()\n",
    "    elif model_type == 'NAS':\n",
    "        return nas_cell()\n",
    "    else:\n",
    "        return nas_cell()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 设定模型计算图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = seq_length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with train_graph.as_default():\n",
    "     # input place holders\n",
    "    with tf.name_scope('inputs') as scope:\n",
    "        X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='x_input')  # 输入\n",
    "        Y = tf.placeholder(tf.float32, [None, output_dim], name='y_input')\n",
    "        keep_prob = tf.placeholder(tf.float32, name='keep_prob_input')  # 保持多少不被 dropout\n",
    "        # batch_size = tf.placeholder(tf.int32, [], name='batch_size_input')  # 批大小\n",
    "\n",
    "    # weights and biases\n",
    "    with tf.name_scope('weights'):\n",
    "        Weights = tf.Variable(tf.truncated_normal([hidden_dim, output_dim], stddev=0.1), dtype=tf.float32, name='W')\n",
    "        # tf.summary.histogram('output_layer_weights', Weights)\n",
    "    with tf.name_scope('biases'):\n",
    "        biases = tf.Variable(tf.random_normal([output_dim]), name='b')\n",
    "        # tf.summary.histogram('output_layer_biases', biases)\n",
    "\n",
    "\n",
    "    # 实现多层 LSTM\n",
    "\n",
    "    enc_cells = []\n",
    "    for i in range(0, n_layers):\n",
    "        enc_cells.append(model_type_select(model_type))\n",
    "    with tf.name_scope(model_type.lower() + '_cells_layers'):\n",
    "        mlstm_cell = tf.contrib.rnn.MultiRNNCell(enc_cells, state_is_tuple=True)\n",
    "\n",
    "    # 全零初始化 state\n",
    "    # _init_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)\n",
    "    # dynamic_rnn 运行网络\n",
    "\n",
    "    with tf.name_scope('output_layer'):\n",
    "        outputs, _states = tf.nn.dynamic_rnn(mlstm_cell, X, dtype=tf.float32, time_major=False)\n",
    "        Y_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn=tf.nn.relu)\n",
    "        # activation_fn : tf.nn.relu\n",
    "        # tf.summary.histogram('outputs', Y_pred)\n",
    "\n",
    "    # We use the last cell's output\n",
    "\n",
    "    # 损失函数设计\n",
    "    with tf.name_scope('loss') as scope:\n",
    "        loss = tf.reduce_sum(tf.square(Y_pred - Y))  # sum of the squares\n",
    "        # From TF graph, decide which tensors you want to log\n",
    "        tf.summary.scalar( Reference_name + '_' +  model_type.lower()+ '_' + str(n_layers) + \"lr:\" + str(learning_rate) + \"hidden\" +str(hidden_dim)+  \",iter:\" + str(\n",
    "                iterations) + '_loss', loss)\n",
    "\n",
    "    # 选择最优化方法\n",
    "\n",
    "    with tf.name_scope('train') as scope:\n",
    "        optimizer = tf.train.AdamOptimizer(learning_rate)\n",
    "        train = optimizer.minimize(loss)\n",
    "\n",
    "    Predictions = tf.placeholder(tf.float32, [None, output_dim])\n",
    "    targets = tf.placeholder(tf.float32, [None, output_dim])\n",
    "\n",
    "    # rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - Predictions)))\n",
    "    rmse = tf.losses.mean_squared_error(targets, Predictions)\n",
    "    \n",
    "    # 生成saver\n",
    "    saver = tf.train.Saver()\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 设定训练验证数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据序列化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ##################################### 数据 #################################\n",
    "AB_X = np.array(x_gy)\n",
    "AB_Y = np.array(y_gy)\n",
    "AB_Y_GT = np.array(y_gt_gy)\n",
    "\n",
    "# 训练数据序列化\n",
    "dataX = []\n",
    "dataY = []\n",
    "dataY_GT = []\n",
    "\n",
    "for i in range(0, len(inputY) - seq_length):\n",
    "    _x = AB_X[i:i + seq_length]\n",
    "    _y = AB_Y[i + seq_length]  \n",
    "    _y_gt = AB_Y_GT[i + seq_length]# Next close price\n",
    "    # print(_x, \"->\", _y)\n",
    "    dataX.append(_x)\n",
    "    dataY.append(_y)\n",
    "    dataY_GT.append(_y_gt)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练集验证集划分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 划分训练集和测试集trainY, testY\n",
    "trainX, testX = train_test_split(dataX, test_size=0.2, random_state=0)\n",
    "trainY, testY = train_test_split(dataY, test_size=0.2, random_state=0)\n",
    "trainY_GT, testY_GT = train_test_split(dataY_GT, test_size=0.2, random_state=0)\n",
    "\n",
    "# list转为array相互转换\n",
    "# trainX, testX = np.array(trainX), np.array(testX)\n",
    "trainX, testX = np.array(dataX), np.array(testX)\n",
    "\n",
    "# trainY, testY = np.array(trainY), np.array(testY)\n",
    "trainY, testY = np.array(dataY), np.array(testY)\n",
    "\n",
    "trainY_GT, testY_GT = np.array(dataY_GT), np.array(testY_GT)\n",
    "\n",
    "print(\"trainX.shape:\", trainX.shape)\n",
    "print(\"testX.shape:\", testX.shape)\n",
    "print(\"trainY.shape:\", trainY.shape)\n",
    "print(\"testY.shape:\", testY.shape)\n",
    "print(\"trainY_GT.shape:\", trainY_GT.shape)\n",
    "print(\"testY_GT.shape:\", testY_GT.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainX"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_graph.version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch_size=seq_length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置迭代次数\n",
    "# epoch_num = 2000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算每一轮epoch中含有的batch个数\n",
    "# batch_total = int(len(trainX)/batch_size)+1\n",
    "# batch_total\n",
    "element_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# batch_trainX,batch_trainY = sen.get_net_batch_data(trainX,trainY,batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch_testX,batch_testY = sen.get_net_batch_data(testX,testY,batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch_trainX.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# merged_summaries = tf.summary.merge_all()\n",
    "# init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n",
    "\n",
    "with tf.Session(graph=train_graph) as sess:\n",
    "# with tf.Session() as sess:\n",
    "    # Merge all summaries\n",
    "    merged_summaries = tf.summary.merge_all()\n",
    "    # Create writer and add graph\n",
    "    train_writer = tf.summary.FileWriter(os.path.join(log_path, 'multi_'+ model_type.lower() + '_train/'), sess.graph)\n",
    "    valid_writer = tf.summary.FileWriter(os.path.join(log_path, 'multi_'+ model_type.lower() + '_valid/'))\n",
    "    \n",
    "    coord = tf.train.Coordinator()\n",
    "    threads = tf.train.start_queue_runners(sess,coord)\n",
    "    \n",
    "    init = tf.global_variables_initializer()\n",
    "    sess.run(init)\n",
    "\n",
    "    # 开始计时\n",
    "    start_time = time.perf_counter()\n",
    "    \n",
    "#     try:\n",
    "#     for i in range(1,epoch_num+1):  # 每一轮迭代\n",
    "    for i in range(1,iterations+1):  # 每一轮迭代\n",
    "            # Training step\n",
    "#             training_loss = 0\n",
    "#             validation_loss = 0\n",
    "        _, summary, step_loss = sess.run([train, merged_summaries, loss],\n",
    "                                             feed_dict={X: trainX, Y: trainY, keep_prob: 1})\n",
    "                # Validate step\n",
    "        valid_str, val_loss = sess.run([merged_summaries, loss],\n",
    "                                           feed_dict={X: testX, Y: testY, keep_prob: 1})\n",
    "        if (i % 10) == 0:\n",
    "            print(\"[step: {}] train_loss: {:.4f},val_loss:{:.4f}\".format(i, step_loss, val_loss))\n",
    "        if i == (iterations - 1):\n",
    "            print(\"[step: {}] train_loss: {:.4f},val_loss:{:.4f}\".format(i + 1, step_loss, val_loss))\n",
    "\n",
    "        train_writer.add_summary(summary, global_step=i)\n",
    "        valid_writer.add_summary(valid_str, global_step=i)\n",
    "#             for j in range(batch_total):\n",
    "# #                 b_trainX,b_trainY = sess.run([batch_trainX,batch_trainY])\n",
    "# #                 b_testX,b_testY = sess.run([batch_testX,batch_testY])\n",
    "#                 # _, summary, step_loss = sess.run([train, merged_summaries, loss], feed_dict={X: trainX, Y: trainY, keep_prob: keep_prob, batch_size:batch_size})\n",
    "#                 _, summary, step_loss = sess.run([train, merged_summaries, loss],\n",
    "#                                              feed_dict={X: b_trainX, Y: b_trainY, keep_prob: 1})\n",
    "#                 # Validate step\n",
    "#                 valid_str, val_loss = sess.run([merged_summaries, loss],\n",
    "#                                            feed_dict={X: b_testX, Y: b_testY, keep_prob: 1})\n",
    "                \n",
    "#                 training_loss = step_loss + training_loss\n",
    "#                 validation_loss = val_loss + validation_loss\n",
    "             \n",
    "#     except tf.errors.OutOfRangeError:\n",
    "#         print(\"done\")\n",
    "#     finally:\n",
    "#         coord.request_stop()\n",
    "#     coord.join(threads)\n",
    "            \n",
    "    \n",
    "    end_time = time.perf_counter()\n",
    "    elapsed = (end_time - start_time)\n",
    "    print(\"Time used:\", elapsed)\n",
    "    \n",
    "    # Test step\n",
    "    test_predict = sess.run(Y_pred, feed_dict={X: testX, keep_prob: 1})\n",
    "    rmse_val = sess.run(rmse, feed_dict={targets: testY, Predictions: test_predict})\n",
    "    print(\"RMSE:{}\".format(rmse_val))\n",
    "\n",
    "    # 训练完以后，使用saver.save 来保存\n",
    "    saver.save(sess, model_file)  # file_name如果不存在的话，会自动创建\n",
    "\n",
    "    # print(test_predict)\n",
    "    # Plot predictions\n",
    "#     plt.subplot(331)\n",
    "#     plt.plot(testY, label=\"Reference\")\n",
    "#     plt.plot(test_predict, label=\"Pred\")\n",
    "# #     plt.xlabel(\"DEPTH\")\n",
    "#     #     plt.xlim(5661,5724.5)\n",
    "#     plt.ylabel(\"element_name\")\n",
    "#     plt.legend(loc='best')\n",
    "    \n",
    "    \n",
    "#     plt.subplot(332)\n",
    "    plt.scatter(testY,test_predict,color=\"blue\")\n",
    "    plt.grid(True)#显示网格线\"\n",
    "    plt.xlabel(\"testY-Reference\")\n",
    "    plt.ylabel(\"test_predict\")\n",
    "    \n",
    "\n",
    "#     plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + '.jpg', dpi=220,\n",
    "#                     bbox_inches='tight')\n",
    "    plt.show()\n",
    "    plt.clf()\n",
    "    plt.close()\n",
    "\n",
    "sess.close()\n",
    "tf.reset_default_graph()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型对标定的训练数据的验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess_val = tf.Session(graph=train_graph)\n",
    "# 生成saver\n",
    "saver = tf.train.import_meta_graph(model_file + '.meta')\n",
    "saver.restore(sess_val, tf.train.latest_checkpoint(model_path))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_predict = sess_val.run(Y_pred, feed_dict={X: testX, keep_prob: 1})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型对标定的验证集进行预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "    plt.figure(figsize=(20,6))\n",
    "    plt.plot(testY, label=\"Reference\")\n",
    "    plt.plot(test_predict, label=\"Pred\")\n",
    "    plt.grid(True)#显示网格线\"\n",
    "    plt.xlabel(\"testY-Reference\")\n",
    "    plt.ylabel(\"test_predict\")\n",
    "    plt.title(element_name + \" on Val Data\")\n",
    "    plt.legend(loc='best')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(8,8))\n",
    "plt.scatter(testY,test_predict)\n",
    "plt.grid(True)#显示网格线\"\n",
    "plt.xlabel(\"testY-TrueGround\")\n",
    "plt.ylabel(\"test_predict\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型对所有标定的训练数据的预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "type(dataX)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "val_X = np.array(dataX)\n",
    "val_Y = np.array(dataY)\n",
    "true_val_Y = np.array(dataY_GT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "val_Y_pred = sess_val.run(Y_pred, feed_dict={X: val_X, keep_prob: 1})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# val_Y_pred"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 对训练数据验证与实际标定结果对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "depth_log = AB_use.loc[:, [\"深度\"]]\n",
    "depth_log = np.array(depth_log)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DEPTH_ALL = sen.build_addReslution_DEPTH(depth_log, seq_length)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "val_Y.shape,val_Y_pred.shape,true_val_Y.shape,DEPTH_ALL.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 分析预测的三点数据与实际数据的相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import statsmodels.api as sm"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 模型预测数据结果与计算数据相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(10,10))\n",
    "plt.title(\"element_log_contrast—\" + model_type.lower())\n",
    "# plt.plot(val_Y_pred,DEPTH_ALL,color=\"red\", label=\"ALL_pred\")\n",
    "# plt.plot(val_Y,DEPTH_ALL,color=\"orange\", label=\"Reference\")\n",
    "plt.scatter(val_Y_pred,true_val_Y,color=\"blue\")\n",
    "plt.grid(True)#显示网格线\"\n",
    "plt.xlabel(\"预测\" + element_name)\n",
    "plt.ylabel(\"计算\" + Reference_name)\n",
    "# plt.legend(loc='best')\n",
    "# plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + 'ValAll.jpg', dpi=220,  bbox_inches='tight')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# val_Y_pred= sm.add_constant(val_Y_pred) # adding a constant\n",
    "ols0 = sm.OLS(true_val_Y, val_Y_pred).fit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ols0.summary()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 模型预测数据结果与实测数据相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(10,10))\n",
    "plt.title(\"element_log_contrast—\" + model_type.lower())\n",
    "# plt.plot(val_Y_pred,DEPTH_ALL,color=\"red\", label=\"ALL_pred\")\n",
    "# plt.plot(val_Y,DEPTH_ALL,color=\"orange\", label=\"Reference\")\n",
    "plt.scatter(val_Y_pred,val_Y,color=\"blue\")\n",
    "plt.grid(True)#显示网格线\"\n",
    "\n",
    "plt.xlabel(\"预测\" + element_name)\n",
    "plt.ylabel(\"实测\" +element_name)\n",
    "# plt.legend(loc='best')\n",
    "# plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + 'ValAll.jpg', dpi=220,  bbox_inches='tight')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# val_Y_pred= sm.add_constant(val_Y_pred) # adding a constant\n",
    "ols = sm.OLS(val_Y, val_Y_pred).fit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ols.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf2",
   "language": "python",
   "name": "psp"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "298.075px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
