{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import gc\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import font_manager\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras import layers\n",
    "from tensorflow import keras\n",
    "from scipy import stats\n",
    "from tensorflow.python.ops import math_ops\n",
    "from tensorflow.python.keras import backend as K\n",
    "\n",
    "from tensorflow.python.keras.utils.generic_utils import get_custom_objects\n",
    "from tensorflow.python.keras.layers import Activation\n",
    "from tensorflow.keras.layers import concatenate\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def swish(x):\n",
    "        return (K.sigmoid(x) * x)\n",
    "\n",
    "get_custom_objects().update({'swish': Activation(swish)})\n",
    "\n",
    "inputs1 = tf.keras.Input(shape=(120, 120, 3))\n",
    "x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu')(inputs1)\n",
    "x = tf.keras.layers.MaxPool2D()(x)\n",
    "x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)\n",
    "x = tf.keras.layers.MaxPool2D()(x)\n",
    "x = tf.keras.layers.GlobalAveragePooling2D()(x)\n",
    "x = tf.keras.layers.Dense(64, activation='relu')(x)\n",
    "x1 = tf.keras.layers.Dense(64, activation='relu')(x)\n",
    "########################################################\n",
    "inputs2 = tf.keras.Input(shape=(120, 120, 3))\n",
    "x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu')(inputs2)\n",
    "x = tf.keras.layers.MaxPool2D()(x)\n",
    "x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)\n",
    "x = tf.keras.layers.MaxPool2D()(x)\n",
    "x = tf.keras.layers.GlobalAveragePooling2D()(x)\n",
    "x = tf.keras.layers.Dense(64, activation='relu')(x)\n",
    "x2 = tf.keras.layers.Dense(64, activation='relu')(x)\n",
    "#################################################\n",
    "merge1 = concatenate([x1, x2])\n",
    "#########################################################################################################\n",
    "\n",
    "features_inputs = tf.keras.Input((5013,2 ), dtype=tf.float32)\n",
    "\n",
    "## feature ##\n",
    "#     feature_x = layers.Dense(2000, activation='swish')(features_inputs)\n",
    "feature_x = layers.Dropout(0.1)(features_inputs)\n",
    "## convolution 1 ##\n",
    "# feature_x = layers.Reshape((10000,1))(feature_x)\n",
    "## convolution 1 ##\n",
    "feature_x = layers.Conv1D(filters=500, kernel_size=21, strides=5, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "## convolution 2 ##\n",
    "feature_x = layers.Conv1D(filters=500, kernel_size=21, strides=1, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "## 当padding为VALID，也就是无填充时，输出张量的大小为(输入张量的大小-pool_size + 1 )/ strides，当padding为SAME时，输出张量的大小为输入张量的大小 / strides。\n",
    "feature_x = layers.MaxPool1D(pool_size=2,strides=None,padding='valid')(feature_x)\n",
    "\n",
    "## convolution 3 ##\n",
    "feature_x = layers.Conv1D(filters=1000, kernel_size=5, strides=1, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "## convolution 4 ##\n",
    "feature_x = layers.Conv1D(filters=1000, kernel_size=5, strides=1, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "feature_x = layers.MaxPool1D(pool_size=2,strides=None,padding='valid')(feature_x)\n",
    "\n",
    "## convolution 5 ##\n",
    "feature_x = layers.Conv1D(filters=2000, kernel_size=5, strides=1, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "## convolution 6 ##\n",
    "feature_x = layers.Conv1D(filters=2000, kernel_size=5, strides=1, padding='same', activation=\"relu\")(feature_x)\n",
    "feature_x = layers.BatchNormalization()(feature_x)\n",
    "#     feature_x = layers.LeakyReLU()(feature_x)\n",
    "feature_x = layers.GlobalAveragePooling1D()(feature_x)\n",
    "\n",
    "#     feature_x = layers.Dense(20, activation='swish')(feature_x)\n",
    "\n",
    "## flatten ##\n",
    "feature_x = layers.Flatten()(feature_x)\n",
    "feature_x = layers.Dropout(0.1)(feature_x)\n",
    "#########################################################################################################################   \n",
    "\n",
    "\n",
    "merge3 = concatenate([merge1, feature_x])\n",
    "\n",
    "merge3 = layers.Dropout(0.1)(merge3)\n",
    "\n",
    "outputs = tf.keras.layers.Dense(1)(merge3)\n",
    "# rmse = keras.metrics.RootMeanSquaredError(name=\"rmse\")\n",
    "model = tf.keras.Model(inputs=[inputs1,inputs2,features_inputs], outputs=outputs)\n",
    "model.compile(optimizer=tf.keras.optimizers.Adam(0.0005), loss='mse', metrics=['mse', \"mae\", \"mape\"])\n",
    "\n",
    "model.summary()\n",
    "keras.utils.plot_model(model, show_shapes=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输入数据最终确定为相位信号+辅助特征+相位信号递归图\n",
    "first_phase_oneperson = pd.read_csv('./first_phase_oneperson.csv')\n",
    "second_phase_oneperson = pd.read_csv('./second_phase_oneperson.csv')\n",
    "all_bcg_data=pd.read_csv('./one_bcg.csv'')\n",
    "label_oneperson = all_bcg_data.iloc[:,10001:]\n",
    "first_features_oneperson = pd.read_csv('./first_features_oneperson.csv')\n",
    "second_features_oneperson = pd.read_csv('./second_features_oneperson.csv')\n",
    "\n",
    "\n",
    "path1 = []\n",
    "path2 = []\n",
    "for i in range(137):\n",
    "    path1.append('./img_data/相位上/'+str(i)+'.png')\n",
    "    path2.append('./img_data/相位下/'+str(i)+'.png')\n",
    "filepaths1 = pd.Series(path1, name='Filepath1').astype(str)\n",
    "filepaths2 = pd.Series(path2, name='Filepath2').astype(str)\n",
    "images = pd.concat([filepaths1, filepaths2], axis=1)\n",
    "\n",
    "\n",
    "all_data = pd.concat([first_phase_oneperson,first_features_oneperson,second_phase_oneperson,second_features_oneperson,label_oneperson,images],axis=1)\n",
    "# all_data.reset_index(drop=True)\n",
    "#随机按行打乱\n",
    "all_data = all_data.reindex(np.random.permutation(all_data.index)).reset_index(drop=True)\n",
    "\n",
    "\n",
    "train_generator = tf.keras.preprocessing.image.ImageDataGenerator(\n",
    "    rescale=1./255,\n",
    "#     validation_split=0.2\n",
    ")\n",
    "test_generator = tf.keras.preprocessing.image.ImageDataGenerator(\n",
    "    rescale=1./255\n",
    ")\n",
    "train_images1 = train_generator.flow_from_dataframe(\n",
    "    dataframe=all_data,\n",
    "    x_col='Filepath1',\n",
    "    y_col='10000',\n",
    "    target_size=(120, 120),\n",
    "    color_mode='rgb',\n",
    "    class_mode='raw',\n",
    "    batch_size=300,\n",
    "    shuffle=False,\n",
    "#     seed=42,\n",
    "#     subset='training'\n",
    ")\n",
    "train_images2 = train_generator.flow_from_dataframe(\n",
    "    dataframe=all_data,\n",
    "    x_col='Filepath2',\n",
    "    y_col='10000',\n",
    "    target_size=(120, 120),\n",
    "    color_mode='rgb',\n",
    "    class_mode='raw',\n",
    "    batch_size=300,\n",
    "    shuffle=False,\n",
    "#     seed=42,\n",
    "#     subset='training'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "features1 = all_data.iloc[:,:5013]\n",
    "features2 = all_data.iloc[:,5013:10026]\n",
    "label_oneperson = all_data.iloc[:,10026:10028]\n",
    "\n",
    "def data_reshape(data1,data2,num,length):\n",
    "    data=[]\n",
    "    for i in np.arange(data1.shape[0]):\n",
    "        for j in np.arange(data1.shape[1]):\n",
    "            data.append(data1.values[i][j])\n",
    "            data.append(data2.values[i][j])\n",
    "    data=np.array(data).reshape(num,length,2)\n",
    "    return data\n",
    "data1=data_reshape(features1,features2,300,5013)\n",
    "\n",
    "\n",
    "x_train = [train_images1[0][0],train_images2[0][0],data1]\n",
    "\n",
    "y1_train = label_oneperson['10000'].values\n",
    "y2_train = label_oneperson['10001'].values\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tbCallBack = tf.keras.callbacks.TensorBoard(log_dir=\"./logs/my_model1\")\n",
    "history = model.fit(  #使用model.fit()方法来执行训练过程，\n",
    "    x_train, y1_train, #告知训练集的输入以及标签，\n",
    "    batch_size = 32, #每一批batch的大小为32，(*****一般为32，64，128，16，配置实在不行再调成1)\n",
    "    epochs = 500, #迭代次数epochs为500（测试的时候调小一点）\n",
    "    validation_split = 0.15, #从测试集中划分80%给训练集\n",
    "     callbacks=[\n",
    "        tf.keras.callbacks.EarlyStopping(\n",
    "            monitor='mse',\n",
    "            patience=50,\n",
    "            restore_best_weights=True\n",
    "        )\n",
    "    ],\n",
    "#     callbacks=[tbCallBack],\n",
    "    shuffle=True,\n",
    "    validation_freq = 20) #测试的间隔次数为20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型的保存\n",
    "model.save(r'model_data/my_model.h5')\n",
    "# 保存训练的历史记录pickle.dump()\n",
    "import pickle\n",
    "with open('model_data/my_model.txt', 'wb') as file_txt:\n",
    "    pickle.dump(history.history, file_txt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
