{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "## Lesson-03-Finally Part"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Part-01 实现拓扑排序"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.tree._criterion import MSE\n",
    "simple_graph = {\n",
    "    'a': [1, 2],\n",
    "    'b': [2, 3]\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import reduce"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "list_a = [1, 2, 3]\n",
    "\n",
    "list_b = [2, 3, 4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": "{4}"
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "set(list_b) - set(list_a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": "[1, 2, 2, 3]"
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reduce(lambda a, b: a + b, list(simple_graph.values()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def topologic(graph):\n",
    "    \"\"\"graph: dict\n",
    "    {\n",
    "        x: [linear],\n",
    "        k: [linear],\n",
    "        b: [linear],\n",
    "        linear: [sigmoid],\n",
    "        sigmoid: [loss],\n",
    "        y: [loss],       \n",
    "    }\n",
    "    \"\"\"\n",
    "    \n",
    "    sorted_node = []\n",
    "    \n",
    "    while graph:\n",
    "        all_nodes_have_inputs = reduce(lambda a, b: a + b, list(graph.values()))\n",
    "        all_nodes_have_outputs = list(graph.keys())\n",
    "    \n",
    "        all_nodes_only_have_outputs_no_inputs = set(all_nodes_have_outputs) - set(all_nodes_have_inputs)\n",
    "        \n",
    "        if all_nodes_only_have_outputs_no_inputs:\n",
    "            node = random.choice(list(all_nodes_only_have_outputs_no_inputs))\n",
    "            \n",
    "            sorted_node.append(node)\n",
    "            if len(graph) == 1: sorted_node += graph[node]\n",
    "                \n",
    "            graph.pop(node)\n",
    "        \n",
    "            for _, links in graph.items():\n",
    "                if node in links: links.remove(node)\n",
    "        else:\n",
    "            raise TypeError('this graph has circle, which cannot get topological order')\n",
    "    return sorted_node\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "x, k, b, linear, sigmoid, y, loss = 'x', 'k', 'b', 'linear', 'sigmoid', 'y', 'loss'\n",
    "\n",
    "test_graph =   {\n",
    "        x: [linear],\n",
    "        k: [linear],\n",
    "        b: [linear],\n",
    "        linear: [sigmoid],\n",
    "        sigmoid: [loss],\n",
    "        y: [loss],       \n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": "{'x': ['linear'],\n 'k': ['linear'],\n 'b': ['linear'],\n 'linear': ['sigmoid'],\n 'sigmoid': ['loss'],\n 'y': ['loss']}"
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": "['y', 'b', 'x', 'k', 'linear', 'sigmoid', 'loss']"
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "topologic(test_graph)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 一起来见证拓扑排序的威力吧！！！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "$\\partial$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Node:\n",
    "    def __init__(self, inputs=[], name=None, is_trainable=False):\n",
    "        self.inputs = inputs\n",
    "        self.outputs = []\n",
    "        self.name = name\n",
    "        self.value = None\n",
    "        self.gradients = dict() # 存储loss对某个值得偏导\n",
    "        self.is_trainable = is_trainable\n",
    "        \n",
    "        for node in inputs:\n",
    "            node.outputs.append(self)\n",
    "            \n",
    "    def forward(self):\n",
    "        #print('I am {}, I have no humam baba, I calculate myself value by MYSEL !!!'.format(self.name))\n",
    "        pass\n",
    "    \n",
    "    def backward(self):\n",
    "        pass\n",
    "            \n",
    "    def __repr__(self):\n",
    "        return 'Node: {}'.format(self.name)\n",
    "    \n",
    "class Placeholder(Node):\n",
    "    def __init__(self, name=None, is_trainable=False):\n",
    "        Node.__init__(self, name=name, is_trainable=is_trainable)\n",
    "            \n",
    "#     def forward(self):\n",
    "#         print('I am {}, I was assigned value:{} by human baba'.format(self.name, self.value))\n",
    "\n",
    "    def backward(self):\n",
    "        self.gradients[self] = self.outputs[0].gradients[self]\n",
    "            \n",
    "    def __repr__(self):\n",
    "        return 'Placeholer: {}'.format(self.name)\n",
    "    \n",
    "    \n",
    "class Linear(Node):\n",
    "    def __init__(self, x, k, b, name=None):\n",
    "        Node.__init__(self, inputs=[x, k, b], name=name)\n",
    "                \n",
    "    def forward(self):\n",
    "        x, k, b = self.inputs[0], self.inputs[1], self.inputs[2]\n",
    "        self.value = k.value * x.value + b.value\n",
    "       # print('I am {}, I have no humam baba, I calculate myself value : {} by MYSEL !!!'.format(self.name, self.value))\n",
    "    \n",
    "    def backward(self):\n",
    "        x, k, b = self.inputs[0], self.inputs[1], self.inputs[2]\n",
    "        self.gradients[self.inputs[0]] = self.outputs[0].gradients[self] * k.value\n",
    "                                            \n",
    "        self.gradients[self.inputs[1]] = self.outputs[0].gradients[self] * x.value\n",
    "        \n",
    "        self.gradients[self.inputs[2]] = self.outputs[0].gradients[self] * 1\n",
    "                                                 \n",
    "\n",
    "#         print('self.gradients[self.inputs[0]]| {}'.format(self.gradients[self.inputs[0]]))\n",
    "#         print('self.gradients[self.inputs[1]]| {}'.format(self.gradients[self.inputs[1]]))\n",
    "#         print('self.gradients[self.inputs[2]]| {}'.format(self.gradients[self.inputs[2]]))\n",
    "        \n",
    "    def __repr__(self):\n",
    "        return 'Linear: {}'.format(self.name)\n",
    "      \n",
    "class Sigmoid(Node):\n",
    "    def __init__(self, x, name=None):\n",
    "        Node.__init__(self, inputs=[x], name=name)\n",
    "    \n",
    "    def _sigmoid(self, x):\n",
    "        return 1 / (1 + np.exp(-x))\n",
    "    \n",
    "    def forward(self):\n",
    "        x = self.inputs[0]\n",
    "        self.value = self._sigmoid(x.value)\n",
    "        \n",
    "       # print('I am {}, I have no humam baba, I calculate myself value : {} by MYSEL !!!'.format(self.name, self.value))\n",
    "\n",
    "    def backward(self):\n",
    "        x = self.inputs[0]\n",
    "        self.gradients[self.inputs[0]] = self.outputs[0].gradients[self] * (self._sigmoid(x.value) * (1 - self._sigmoid(x.value)))\n",
    "\n",
    "       # print('self.gradients[self.inputs[0]]| {}'.format(self.gradients[self.inputs[0]]))\n",
    "\n",
    "            \n",
    "    def __repr__(self):\n",
    "        return 'Linear: {}'.format(self.name)\n",
    "    \n",
    "class Loss(Node):\n",
    "    def __init__(self, y, yhat, name=None):\n",
    "        Node.__init__(self, inputs=[y, yhat], name=name)\n",
    "    \n",
    "    def forward(self):\n",
    "        y = self.inputs[0]\n",
    "        yhat = self.inputs[1]\n",
    "        self.value = np.mean((y.value - yhat.value)**2)\n",
    "        \n",
    "    #    print('I am {}, I have no humam baba, I calculate myself value : {} by MYSEL !!!'.format(self.name, self.value))\n",
    "    \n",
    "    def backward(self):\n",
    "        y = self.inputs[0]\n",
    "        yhat = self.inputs[1]\n",
    "#         self.gradients[self.inputs[0]] = '∂{} / ∂{}'.format(self.name, self.inputs[0].name)\n",
    "#         self.gradients[self.inputs[1]] = '∂{} / ∂{}'.format(self.name, self.inputs[1].name)\n",
    "        \n",
    "        self.gradients[self.inputs[0]] = 2 * np.mean(y.value - yhat.value)\n",
    "        self.gradients[self.inputs[1]] = -2 * np.mean(y.value - yhat.value)\n",
    "        \n",
    "#         print('self.gradients[self.inputs[0]].{}| {}'.format(self.inputs[0].name, \n",
    "#                                                              self.gradients[self.inputs[0]]))\n",
    "#         print('self.gradients[self.inputs[1]].{}| {}'.format(self.inputs[1].name, \n",
    "#                                                              self.gradients[self.inputs[1]]))\n",
    "        \n",
    "    def __repr__(self):\n",
    "        return 'Loss: {}'.format(self.name)\n",
    "## Our Simple Model Elements\n",
    "# version-01\n",
    "# node_x = Node(inputs=None)\n",
    "# node_y = Node(inputs=None, outputs=[node_loss])\n",
    "# node_k = Node(inputs=None, outputs=[node_linear])\n",
    "# node_b = Node(inputs=None, outputs=[node_linear])\n",
    "# node_linear = Node(inputs=[node_x, node_k, node_b], \n",
    "#                    outputs=[node_sigmoid])\n",
    "# node_sigmoid = Node(inputs=[node_linear], outputs=[node_loss])\n",
    "# node_loss = Node(inputs=[node_sigmoid, node_y], outputs=None)\n",
    "\n",
    "#version-02\n",
    "node_x = Placeholder(name='x')\n",
    "node_y = Placeholder(name='y')\n",
    "\n",
    "node_k = Placeholder(name='k', is_trainable=True)\n",
    "node_b = Placeholder(name='b', is_trainable=True)\n",
    "\n",
    "node_linear = Linear(node_x, node_k, node_b, name='linear')\n",
    "node_sigmoid = Sigmoid(x=node_linear, name='sigmoid')\n",
    "node_loss = Loss(yhat=node_sigmoid, y=node_y, name='loss')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "feed_dict = {\n",
    "    node_x: 3,\n",
    "    node_y: random.random(),\n",
    "    node_k: random.random(),\n",
    "    node_b: 0.38\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "def convert_feed_dict_to_graph(feed_dict):\n",
    "    need_expand = [n for n in feed_dict]\n",
    "\n",
    "    computing_graph = defaultdict(list)\n",
    "\n",
    "    while need_expand:\n",
    "        n = need_expand.pop(0)\n",
    "\n",
    "        if n in computing_graph: continue\n",
    "            \n",
    "        if isinstance(n, Placeholder): n.value = feed_dict[n]\n",
    "\n",
    "        for m in n.outputs:\n",
    "            computing_graph[n].append(m)\n",
    "            need_expand.append(m)\n",
    "            \n",
    "    return computing_graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "aaa=convert_feed_dict_to_graph(feed_dict)\n",
    "sorted_nodes = topologic(aaa)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "defaultdict(<class 'list'>, {})\n"
     ]
    },
    {
     "data": {
      "text/plain": "[Placeholer: y,\n Placeholer: b,\n Placeholer: x,\n Placeholer: k,\n Linear: linear,\n Linear: sigmoid,\n Loss: loss]"
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(aaa)\n",
    "sorted_nodes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'MSE' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "\u001B[1;32m<ipython-input-18-766e096571a7>\u001B[0m in \u001B[0;36m<module>\u001B[1;34m\u001B[0m\n\u001B[0;32m     30\u001B[0m \u001B[0ms1\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mSigmoid\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0ml1\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     31\u001B[0m \u001B[0ml2\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mLinear\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0ms1\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mW2\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mb2\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m---> 32\u001B[1;33m \u001B[0mcost\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mMSE\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0my\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0ml2\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m     33\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     34\u001B[0m feed_dict = {\n",
      "\u001B[1;31mNameError\u001B[0m: name 'MSE' is not defined"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.datasets import load_boston\n",
    "from sklearn.utils import shuffle, resample\n",
    "#from miniflow import *\n",
    "\n",
    "# Load data\n",
    "data = load_boston()\n",
    "dataframe = pd.DataFrame(data['data'])\n",
    "dataframe.columns = data['feature_names']\n",
    "\n",
    "X_ = dataframe[['RM', 'LSTAT']]\n",
    "y_ = data['target']\n",
    "\n",
    "# Normalize data\n",
    "X_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)\n",
    "\n",
    "n_features = X_.shape[1]\n",
    "n_hidden = 10\n",
    "W1_ = np.random.randn(n_features, n_hidden)\n",
    "b1_ = np.zeros(n_hidden)\n",
    "W2_ = np.random.randn(n_hidden, 1)\n",
    "b2_ = np.zeros(1)\n",
    "\n",
    "# Neural network\n",
    "X, y = Placeholder(), Placeholder()\n",
    "W1, b1 = Placeholder(), Placeholder()\n",
    "W2, b2 = Placeholder(), Placeholder()\n",
    "\n",
    "l1 = Linear(X, W1, b1)\n",
    "s1 = Sigmoid(l1)\n",
    "l2 = Linear(s1, W2, b2)\n",
    "cost = MSE(y, l2)\n",
    "\n",
    "feed_dict = {\n",
    "    X: X_,\n",
    "    y: y_,\n",
    "    W1: W1_,\n",
    "    b1: b1_,\n",
    "    W2: W2_,\n",
    "    b2: b2_\n",
    "}\n",
    "\n",
    "epochs = 200\n",
    "# Total number of examples\n",
    "m = X_.shape[0]\n",
    "batch_size = 1\n",
    "steps_per_epoch = m // batch_size\n",
    "\n",
    "graph = topological_sort_feed_dict(feed_dict)\n",
    "trainables = [W1, b1, W2, b2]\n",
    "\n",
    "print(\"Total number of examples = {}\".format(m))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm_notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "losses = []\n",
    "\n",
    "\n",
    "for i in tqdm_notebook(range(epochs)):\n",
    "    loss = 0\n",
    "    for j in range(steps_per_epoch):\n",
    "        # Step 1\n",
    "        # Randomly sample a batch of examples\n",
    "        X_batch, y_batch = resample(X_, y_, n_samples=batch_size)\n",
    "\n",
    "        # Reset value of X and y Inputs\n",
    "        X.value = X_batch\n",
    "        y.value = y_batch\n",
    "\n",
    "        # Step 2\n",
    "        _ = None\n",
    "        forward_and_backward(graph) # set output node not important.\n",
    "\n",
    "        # Step 3\n",
    "        rate = 1e-2\n",
    "    \n",
    "        optimize(trainables, rate)\n",
    "\n",
    "        loss += graph[-1].value\n",
    "    \n",
    "    if i % 100 == 0: \n",
    "        print(\"Epoch: {}, Loss: {:.3f}\".format(i+1, loss/steps_per_epoch))\n",
    "        losses.append(loss/steps_per_epoch)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 效果是很好啦~ 但是，我们想知道到底拟合出来了什么函数。怎么办呢？？"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 把维度降低成三维空间"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from mpl_toolkits.mplot3d import Axes3D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X in graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "predicate_results = []\n",
    "for rm, ls in X_.values:\n",
    "    X.value = np.array([[rm, ls]])\n",
    "    forward_and_backward(graph)\n",
    "    predicate_results.append(graph[-2].value[0][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "predicate_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "fig = plt.figure(figsize=(10, 10))\n",
    "ax = fig.add_subplot(111, projection='3d')\n",
    "\n",
    "X_ = dataframe[['RM', 'LSTAT']].values[:, 0]\n",
    "Y_ = dataframe[['RM', 'LSTAT']].values[:, 1]\n",
    "\n",
    "Z = predicate_results\n",
    "\n",
    "rm_and_lstp_price = ax.plot_trisurf(X_, Y_, Z, color='green')\n",
    "\n",
    "ax.set_xlabel('RM')\n",
    "ax.set_ylabel('% of lower state')\n",
    "ax.set_zlabel('Predicated-Price')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## How to distribute code to internet!"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## send-email, 稍微介绍一下自己，需要源代码~\n",
    "\n",
    "## minchiuan.gao@gmail.com "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "pycharm-a2eace9b",
   "language": "python",
   "display_name": "PyCharm (neuralnrtwork)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}