{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.1.0\n",
      "sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)\n",
      "matplotlib 3.1.3\n",
      "numpy 1.18.1\n",
      "pandas 1.0.1\n",
      "sklearn 0.22.1\n",
      "tensorflow 2.1.0\n",
      "tensorflow_core.python.keras.api._v2.keras 2.2.4-tf\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'\\n再模型中：自己实现梯度下降\\n'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow import keras\n",
    "\n",
    "print(tf.__version__)\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, tf, keras:\n",
    "    print(module.__name__, module.__version__)\n",
    "\n",
    "'''\n",
    "自己实现模型训练过程，遇到的功能：\n",
    "1. keras.losses.mean_squared_error 计算损失值(计算均方差)\n",
    "    注意数据shape格式，模型的返回结果是（batch_size,1）,为了计算损失值， 要 y_pred = tf.squeeze(y_pred, 1)，进行降维处理\n",
    "2. optimizer = keras.optimizers.SGD() 创建按优化器，对变量进行梯度下降\n",
    "3. metric = keras.metrics.MeanSquaredError()创建metric对象，用来累计计算损失函数\n",
    "\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ".. _california_housing_dataset:\n",
      "\n",
      "California Housing dataset\n",
      "--------------------------\n",
      "\n",
      "**Data Set Characteristics:**\n",
      "\n",
      "    :Number of Instances: 20640\n",
      "\n",
      "    :Number of Attributes: 8 numeric, predictive attributes and the target\n",
      "\n",
      "    :Attribute Information:\n",
      "        - MedInc        median income in block\n",
      "        - HouseAge      median house age in block\n",
      "        - AveRooms      average number of rooms\n",
      "        - AveBedrms     average number of bedrooms\n",
      "        - Population    block population\n",
      "        - AveOccup      average house occupancy\n",
      "        - Latitude      house block latitude\n",
      "        - Longitude     house block longitude\n",
      "\n",
      "    :Missing Attribute Values: None\n",
      "\n",
      "This dataset was obtained from the StatLib repository.\n",
      "http://lib.stat.cmu.edu/datasets/\n",
      "\n",
      "The target variable is the median house value for California districts.\n",
      "\n",
      "This dataset was derived from the 1990 U.S. census, using one row per census\n",
      "block group. A block group is the smallest geographical unit for which the U.S.\n",
      "Census Bureau publishes sample data (a block group typically has a population\n",
      "of 600 to 3,000 people).\n",
      "\n",
      "It can be downloaded/loaded using the\n",
      ":func:`sklearn.datasets.fetch_california_housing` function.\n",
      "\n",
      ".. topic:: References\n",
      "\n",
      "    - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n",
      "      Statistics and Probability Letters, 33 (1997) 291-297\n",
      "\n",
      "(20640, 8)\n",
      "(20640,)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.datasets import fetch_california_housing\n",
    "\n",
    "housing = fetch_california_housing()\n",
    "print(housing.DESCR)\n",
    "print(housing.data.shape)\n",
    "print(housing.target.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(11610, 8) (11610,)\n",
      "(3870, 8) (3870,)\n",
      "(5160, 8) (5160,)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "x_train_all, x_test, y_train_all, y_test = train_test_split(\n",
    "    housing.data, housing.target, random_state = 7)\n",
    "x_train, x_valid, y_train, y_valid = train_test_split(\n",
    "    x_train_all, y_train_all, random_state = 11)\n",
    "print(x_train.shape, y_train.shape)\n",
    "print(x_valid.shape, y_valid.shape)\n",
    "print(x_test.shape, y_test.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "scaler = StandardScaler()\n",
    "x_train_scaled = scaler.fit_transform(x_train)\n",
    "x_valid_scaled = scaler.transform(x_valid)\n",
    "x_test_scaled = scaler.transform(x_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(9.0, shape=(), dtype=float32)\n",
      "tf.Tensor(5.0, shape=(), dtype=float32)\n",
      "tf.Tensor(5.0, shape=(), dtype=float32)\n",
      "tf.Tensor(4.0, shape=(), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "# metric使用\n",
    "## metric的作用就是 计算均方差，即损失值\n",
    "## 优点是：可以把保存状态，提供一个状态保存的功能。\n",
    "## 每新计算一个方差，就会把它的值加入到以前的方差中，计算出“均”方差\n",
    "\n",
    "\n",
    "metric = keras.metrics.MeanSquaredError()\n",
    "print(metric([5.], [2.]))\n",
    "print(metric([0.], [1.]))\n",
    "print(metric.result())\n",
    "\n",
    "## 要清除状态 使用，metric.reset_states()\n",
    "metric.reset_states()\n",
    "metric([1.], [3.])\n",
    "print(metric.result())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_3\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_6 (Dense)              (None, 30)                270       \n",
      "_________________________________________________________________\n",
      "dense_7 (Dense)              (None, 1)                 31        \n",
      "=================================================================\n",
      "Total params: 301\n",
      "Trainable params: 301\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "# 1. batch 遍历训练集 metric\n",
    "#    1.1 自动求导\n",
    "# 2. epoch结束 验证集 metric\n",
    "\n",
    "## 手动实现模型求导，梯度下降\n",
    "## 指定模型遍历次数\n",
    "epochs = 100\n",
    "\n",
    "batch_size = 32\n",
    "\n",
    "steps_per_epoch = len(x_train_scaled) // batch_size\n",
    "\n",
    "## 创建optimizer对象\n",
    "optimizer = keras.optimizers.SGD()\n",
    "\n",
    "## 创建metric对象，用来累计计算损失函数\n",
    "metric = keras.metrics.MeanSquaredError()\n",
    "\n",
    "\n",
    "## 从总数据集中获取一个batch_size的内容\n",
    "def random_batch(x, y, batch_size=32):\n",
    "    idx = np.random.randint(0, len(x), size=batch_size)\n",
    "    return x[idx], y[idx]\n",
    "\n",
    "\n",
    "## 创建模型对象\n",
    "model = keras.models.Sequential([\n",
    "    keras.layers.Dense(30, activation='relu',\n",
    "                       ## 指定模型输入的数据格式，使得能够提前提前计算出模型训练参数\n",
    "                   input_shape=x_train.shape[1:]),\n",
    "    keras.layers.Dense(1),\n",
    "])\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1, 2)\n",
      "(3, 4)\n",
      "(5, 6)\n",
      "(7, 8)\n"
     ]
    }
   ],
   "source": [
    "a=[1,3,5,7]\n",
    "b=[2,4,6,8]\n",
    "for item in zip(a,b):\n",
    "    print(item)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Layer dense_18 is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2.  The layer has dtype float32 because it's dtype defaults to floatx.\n",
      "\n",
      "If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n",
      "\n",
      "To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n",
      "\n",
      "Epoch 0  train mse: 0.77979445\t valid mse:  0.5342516347075751e: 1.5549242 0  train mse: 0.9433953  train mse: 0.8690327 train mse: 0.79126763\n",
      "Epoch 1  train mse: 1.73971725 0.6189977\t valid mse:  0.6247201783350896\n",
      "Epoch 2  train mse: 0.43060863\t valid mse:  0.45813503628747887348 2  train mse: 0.4320744\n",
      "Epoch 3  train mse: 0.38426176\t valid mse:  0.3924865809614263n mse: 0.381796663  train mse: 0.38603294\n",
      "Epoch 4  train mse: 0.36647698\t valid mse:  0.38190800144052645 mse: 0.3545125 4  train mse: 0.36659408 4  train mse: 0.3709987\n",
      "Epoch 5  train mse: 0.3608914\t valid mse:  0.3750463593464562ain mse: 0.38375252 0.36470184\n",
      "Epoch 6  train mse: 0.35931957\t valid mse:  0.3698337288053999\n",
      "Epoch 7  train mse: 0.35546628\t valid mse:  0.4932347605252295n mse: 0.357785  train mse: 0.35616645\n",
      "Epoch 8  train mse: 0.37204683\t valid mse:  0.36766320048330153\n",
      "Epoch 9  train mse: 0.35640496\t valid mse:  0.3723468411318954\n"
     ]
    }
   ],
   "source": [
    "# 1. batch 遍历训练集 metric\n",
    "#    1.1 自动求导\n",
    "# 2. epoch结束 验证集 metric\n",
    "\n",
    "## 手动实现模型求导，梯度下降\n",
    "## 指定模型遍历次数\n",
    "epochs = 10\n",
    "\n",
    "batch_size = 32\n",
    "\n",
    "steps_per_epoch = len(x_train_scaled) // batch_size\n",
    "\n",
    "## 创建optimizer对象\n",
    "optimizer = keras.optimizers.SGD()\n",
    "\n",
    "## 创建metric对象，用来累计计算损失函数\n",
    "metric = keras.metrics.MeanSquaredError()\n",
    "\n",
    "\n",
    "## 从总数据集中获取一个batch_size的内容\n",
    "def random_batch(x, y, batch_size=32):\n",
    "    idx = np.random.randint(0, len(x), size=batch_size)\n",
    "    return x[idx], y[idx]\n",
    "\n",
    "\n",
    "## 创建模型对象\n",
    "model = keras.models.Sequential([\n",
    "    keras.layers.Dense(30, activation='relu',\n",
    "                       ## 指定模型输入的数据格式，使得能够提前提前计算出模型训练参数\n",
    "                       input_shape=x_train.shape[1:]),\n",
    "    keras.layers.Dense(1),\n",
    "])\n",
    "\n",
    "## 使用循环来实现 epoch次数的全部迭代\n",
    "for epoch in range(epochs):\n",
    "    ## 清空metric 的状态值\n",
    "    metric.reset_states()\n",
    "    ## 一次epoch 执行几次step，一个step中有batch_size份数据。一次batch_size执行一次参数更新\n",
    "    for step in range(steps_per_epoch):\n",
    "        ## 取出 batch_size份数据\n",
    "        ## y_batch数据shape是 (batch_size,)就是一维向量\n",
    "        x_batch, y_batch = random_batch(x_train_scaled, y_train,\n",
    "                                        batch_size)\n",
    "        \n",
    "        ## 创建tape对象，用来计算梯度\n",
    "        with tf.GradientTape() as tape:\n",
    "            ## 函数对象\n",
    "            y_pred = model(x_batch)\n",
    "            \n",
    "            ## 模型输出的结果是 [ [],[],[]... ]\n",
    "            ## 将模型的输出由[32,1]的二维数组，转变为一维向量\n",
    "            y_pred = tf.squeeze(y_pred, 1)\n",
    "            \n",
    "            ## 使用keras.losses.mean_squared_error()函数，计算出一个标量的loss值\n",
    "            ## 两个传入的数据都是一维标量，计算均方差比较简单\n",
    "            loss = keras.losses.mean_squared_error(y_batch, y_pred)\n",
    "            \n",
    "            ## 计算均方差\n",
    "            metric(y_batch, y_pred)\n",
    "        ## 调用tape计算loss损失函数表达式对各个参数的梯度值\n",
    "        grads = tape.gradient(loss, model.variables)\n",
    "        ## 因为optimizer的apply_gradients 函数需要传入 [(变量梯度，变量值)]这种格式，于是使用zip将梯度值和变量值“绑”起来\n",
    "        ## zip ([1,2,3,4],[4,3,2,1])= [(1,4),(2,3),(3,2),(4,1)]\n",
    "        grads_and_vars = zip(grads, model.variables)\n",
    "        \n",
    "        ## 使用apply_gradients()方法，将模型中的每个变量进行梯度下降操作\n",
    "        optimizer.apply_gradients(grads_and_vars)\n",
    "        \n",
    "        ## 显示本轮epoch的数据结果，\\r表示将光标移到行首，实现动态刷新的效果\n",
    "        print(\"\\rEpoch\", epoch, \" train mse:\",\n",
    "              metric.result().numpy(), end=\"\")\n",
    "    ## 一轮epoch后，调用valid数据进行验证\n",
    "    y_valid_pred = model(x_valid_scaled)\n",
    "    y_valid_pred = tf.squeeze(y_valid_pred, 1)\n",
    "    \n",
    "    ## 显示验证数据集上的loss\n",
    "    valid_loss = keras.losses.mean_squared_error(y_valid_pred, y_valid)\n",
    "    print(\"\\t\", \"valid mse: \", valid_loss.numpy())\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
