{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-01-03T13:11:18.515950Z",
     "start_time": "2020-01-03T13:11:18.503442Z"
    }
   },
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "new() received an invalid combination of arguments - got (list, requires_grad=bool), but expected one of:\n * (torch.device device)\n * (torch.Storage storage)\n * (Tensor other)\n * (tuple of ints size, torch.device device)\n      didn't match because some of the keywords were incorrect: requires_grad\n * (object data, torch.device device)\n      didn't match because some of the keywords were incorrect: requires_grad\n",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-17-cc66153fb3fd>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     35\u001b[0m '''\n\u001b[1;32m     36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0mx\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     38\u001b[0m \u001b[0mw1\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0mVariable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     39\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mw1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: new() received an invalid combination of arguments - got (list, requires_grad=bool), but expected one of:\n * (torch.device device)\n * (torch.Storage storage)\n * (Tensor other)\n * (tuple of ints size, torch.device device)\n      didn't match because some of the keywords were incorrect: requires_grad\n * (object data, torch.device device)\n      didn't match because some of the keywords were incorrect: requires_grad\n"
     ]
    }
   ],
   "source": [
    "import numpy\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "'''\n",
    "    题目1：\n",
    "        用pytorch 实现两层神经网络拟合，不使用model。题目：一个全连接的ReLU神经网络，一个隐藏层，没有bias，用来从x预测y。\n",
    "        两层线性层:\n",
    "            h = W1 * x\n",
    "            h_relu = relu(h)\n",
    "            y = W2 * x\n",
    "        优化方法使用随机梯度下降\n",
    "            w1 = w1 - learning_rate * （w1的导数）\n",
    "            w2 = w2 - learning_rate * (w2的导数)\n",
    "        在pytorch中可以利用：\n",
    "            x= Variable(torch.Tensor([1]),requires_grad=True)\n",
    "            w1= Variable(torch.Tensor([2]),requires_grad=True)\n",
    "            y = x * w1\n",
    "            y.backward()\n",
    "            就能使用\n",
    "            x.grad表示x对于y的导数\n",
    "            也就是 x.grad = 2.0\n",
    "        使用Loss函数：\n",
    "            MSE = (y - y预测)^2\n",
    "        \n",
    "    Tip:\n",
    "        模型的设计包括三部分：\n",
    "        1. 数据的准备\n",
    "        2. 模型的设置\n",
    "            2.1 模型的激活函数(ReLu)，Loss函数\n",
    "            2.2 优化方法（SGD，ADAM,ADAGRAM)\n",
    "            2.3 反向传播可使用backward\n",
    "        3. 模型的编译（如果是静态模型）和运行\n",
    "    Warning：\n",
    "        本次考核只能使用torch进行，不可以使用其他任何的框架和模型，使用框架或者模型当做0分处理。\n",
    "'''\n",
    "\n",
    "x= Variable(torch.Tensor([1]),requires_grad=True)\n",
    "w1= Variable(torch.Tensor([2]),requires_grad=True)\n",
    "y = x * w1\n",
    "y.backward()\n",
    "\n",
    "print(x.grad)\n",
    "            \n",
    "#  上面代码是对于自动求导的样例\n",
    "            \n",
    "EPOCH = 500\n",
    "LEARNING_RATE = 1e-6\n",
    "N , D_in , H , D_out = 64 , 1000, 100 , 10 # N代表的是样本个数，D_in是样本的维度，H是隐藏层的维度,D_out是输出层的维度\n",
    "X = torch.randn(N,D_in)\n",
    "y = torch.randn(N,D_out)\n",
    "W1 = torch.randn(D_in,H,requires_grad=True)\n",
    "W2 = torch.randn(H,D_out,requires_grad=True)\n",
    "\n",
    "# 下面是你要开始写的内容\n",
    "# 请根据公式完成两层模型的搭建."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy\n",
    "\n",
    "'''\n",
    "    题目2：\n",
    "        用numpy 实现两层神经网络。题目：一个全连接的ReLU神经网络，一个隐藏层，没有bias，用来从x预测y。\n",
    "    Tip:\n",
    "        模型的设计包括三部分：\n",
    "        1. 数据的准备\n",
    "        2. 模型的设置\n",
    "            2.1 模型的激活函数(ReLu)，Loss函数\n",
    "            2.2 优化方法（SGD，ADAM,ADAGRAM)\n",
    "            2.3 反向传播\n",
    "        3. 模型的编译（如果是静态模型）和运行\n",
    "    Warning：\n",
    "        本次考核只能使用numpy进行，不可以使用任何的框架，使用框架当做0分处理。\n",
    "    \n",
    "'''\n",
    "EPOCH = 500\n",
    "LEARNING_RATE = 0.01\n",
    "N , D_in , H , D_out = 64 , 1000, 100 , 10 # N代表的是样本个数，D_in是样本的维度，H是隐藏层的维度,D_out是输出层的维度\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选做题\n",
    "'''\n",
    "    使用cpp 实现两层神经网络，不使用model。题目：一个全连接的ReLU神经网络，一个隐藏层，没有bias，用来从x预测y。\n",
    "    Tip:\n",
    "        模型的设计包括三部分：\n",
    "        1. 数据的准备\n",
    "        2. 模型的设置\n",
    "            2.1 模型的激活函数(sigmoid或者relu)，Loss函数MSE\n",
    "            2.2 优化方法（SGD)\n",
    "            2.3 反向传播不使用backward\n",
    "        3. 模型的编译（如果是静态模型）和运行\n",
    "    Warning：\n",
    "         本次考核只能使用cuda进行，不可以使用其他任何的框架和模型，使用框架或者模型当做0分处理。\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy\n",
    "\n",
    "'''\n",
    "    题目3：\n",
    "        用pytorch 实现两层神经网络，不使用model。题目：一个全连接的ReLU神经网络，一个隐藏层，没有bias，用来从x预测y。\n",
    "    Tip:\n",
    "        模型的设计包括三部分：\n",
    "        1. 数据的准备\n",
    "        2. 模型的设置\n",
    "            2.1 模型的激活函数(ReLu)，Loss函数\n",
    "            2.2 优化方法（SGD，ADAM,ADAGRAM)\n",
    "            2.3 反向传播不使用backward\n",
    "        3. 模型的编译（如果是静态模型）和运行\n",
    "    Warning：\n",
    "         本次考核只能使用torch进行，不可以使用其他任何的框架和模型，使用框架或者模型当做0分处理。\n",
    "        \n",
    "'''\n",
    "EPOCH = 500\n",
    "LEARNING_RATE = 1e-6\n",
    "N , D_in , H , D_out = 64 , 1000, 100 , 10 # N代表的是样本个数，D_in是样本的维度，H是隐藏层的维度,D_out是输出层的维度\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}