{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "cb9e3f4a",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "[[ 1.1630785   0.4838046   0.29956347  0.15302546 -1.1688148   1.558071\n",
      "  -0.5459446  -2.3556297   0.54144025  2.6785064   1.2546344  -0.54877406\n",
      "  -0.68106437 -0.1353156   0.37723133  0.41016456  0.5712682  -2.7579627\n",
      "   1.07628    -0.6141325 ]\n",
      " [ 1.8307649  -1.1468065   0.05383794 -2.5074806  -0.5916499   0.85860497\n",
      "  -0.22794183  0.20131476  0.3500547   0.5360521   1.5194443   1.9040879\n",
      "  -1.5734432  -0.14007866  0.29670075  1.3111951   0.5035904  -1.189445\n",
      "  -0.55021375 -1.5918757 ]\n",
      " [ 0.15606561 -0.56803983 -0.8062886  -1.200044   -1.1877518  -0.15346648\n",
      "   0.13385749 -0.7531863   1.9670967  -1.3998121   0.43909982 -0.46219134\n",
      "   0.25316575 -2.302824    1.6881202  -0.6523198   0.89039123 -0.27633253\n",
      "   1.9417537   0.7879354 ]\n",
      " [-0.85567254 -0.8239901   0.5187018   0.15364595  1.0113305  -0.42647716\n",
      "   0.6850326   1.174712    1.3677438   1.0384175  -1.7305615   0.6770596\n",
      "  -2.5028148  -0.28589353 -0.36147118 -1.0910612   0.24606876 -1.053679\n",
      "  -0.26470134 -0.63217986]]\n",
      "<NDArray 4x20 @cpu(0)>\n",
      "epoch 1,loss 20011.648438\n",
      "epoch 2,loss 19113.437500\n",
      "epoch 3,loss 19815.238281\n",
      "epoch 4,loss 18910.693359\n",
      "epoch 5,loss 19772.601562\n",
      "epoch 6,loss 18898.609375\n",
      "epoch 7,loss 19079.437500\n",
      "epoch 8,loss 18660.451172\n",
      "epoch 9,loss 18613.408203\n",
      "epoch 10,loss 19416.365234\n",
      "epoch 11,loss 19288.728516\n",
      "epoch 12,loss 17892.203125\n",
      "epoch 13,loss 17724.664062\n",
      "epoch 14,loss 17513.355469\n",
      "epoch 15,loss 17354.490234\n",
      "epoch 16,loss 17147.992188\n",
      "epoch 17,loss 17003.119141\n",
      "epoch 18,loss 16804.691406\n",
      "epoch 19,loss 17166.652344\n",
      "epoch 20,loss 16711.425781\n",
      "epoch 21,loss 16487.960938\n",
      "epoch 22,loss 16753.812500\n",
      "epoch 23,loss 16472.447266\n",
      "epoch 24,loss 17243.267578\n",
      "epoch 25,loss 16842.462891\n",
      "epoch 26,loss 17000.740234\n",
      "epoch 27,loss 16816.289062\n",
      "epoch 28,loss 16181.908203\n",
      "epoch 29,loss 16318.419922\n",
      "epoch 30,loss 16567.914062\n",
      "epoch 31,loss 16162.223633\n",
      "epoch 32,loss 16098.511719\n",
      "epoch 33,loss 16185.218750\n",
      "epoch 34,loss 16227.733398\n",
      "epoch 35,loss 16423.802734\n",
      "epoch 36,loss 16274.695312\n",
      "epoch 37,loss 16161.783203\n",
      "epoch 38,loss 17090.357422\n",
      "epoch 39,loss 16130.405273\n",
      "epoch 40,loss 16391.974609\n",
      "epoch 41,loss 16297.688477\n",
      "epoch 42,loss 15930.355469\n",
      "epoch 43,loss 16509.146484\n",
      "epoch 44,loss 16018.309570\n",
      "epoch 45,loss 15928.495117\n",
      "epoch 46,loss 16147.626953\n",
      "epoch 47,loss 15668.110352\n",
      "epoch 48,loss 15696.325195\n",
      "epoch 49,loss 15795.797852\n",
      "epoch 50,loss 15781.660156\n",
      "epoch 51,loss 15469.740234\n",
      "epoch 52,loss 15358.672852\n",
      "epoch 53,loss 16102.281250\n",
      "epoch 54,loss 15307.775391\n",
      "epoch 55,loss 15416.691406\n",
      "epoch 56,loss 15075.502930\n",
      "epoch 57,loss 15264.389648\n",
      "epoch 58,loss 14750.046875\n",
      "epoch 59,loss 15248.125000\n",
      "epoch 60,loss 15145.394531\n",
      "epoch 61,loss 14809.211914\n",
      "epoch 62,loss 15274.691406\n",
      "epoch 63,loss 14716.883789\n",
      "epoch 64,loss 14931.644531\n",
      "epoch 65,loss 15152.776367\n",
      "epoch 66,loss 15127.849609\n",
      "epoch 67,loss 14998.790039\n",
      "epoch 68,loss 14946.736328\n",
      "epoch 69,loss 14870.089844\n",
      "epoch 70,loss 14899.906250\n",
      "epoch 71,loss 15007.747070\n",
      "epoch 72,loss 14821.208008\n",
      "epoch 73,loss 15726.153320\n",
      "epoch 74,loss 14656.218750\n",
      "epoch 75,loss 14950.537109\n",
      "epoch 76,loss 14635.796875\n",
      "epoch 77,loss 14687.589844\n",
      "epoch 78,loss 14620.756836\n",
      "epoch 79,loss 14842.535156\n",
      "epoch 80,loss 14896.209961\n",
      "epoch 81,loss 15080.834961\n",
      "epoch 82,loss 14928.787109\n",
      "epoch 83,loss 15014.212891\n",
      "epoch 84,loss 14520.792969\n",
      "epoch 85,loss 16562.335938\n",
      "epoch 86,loss 15289.127930\n",
      "epoch 87,loss 14524.183594\n",
      "epoch 88,loss 15038.200195\n",
      "epoch 89,loss 14989.333008\n",
      "epoch 90,loss 14912.561523\n",
      "epoch 91,loss 15060.921875\n",
      "epoch 92,loss 15056.219727\n",
      "epoch 93,loss 14750.403320\n",
      "epoch 94,loss 14401.740234\n",
      "epoch 95,loss 14470.366211\n",
      "epoch 96,loss 14682.622070\n",
      "epoch 97,loss 14983.868164\n",
      "epoch 98,loss 14754.549805\n",
      "epoch 99,loss 14689.480469\n",
      "epoch 100,loss 14578.351562\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "import d2lzh as d2l\n",
    "import xlrd\n",
    "import random\n",
    "import math\n",
    "from IPython import display\n",
    "from matplotlib import pyplot as plt\n",
    "from mxnet import autograd, nd\n",
    "batch_size =10\n",
    "num_inputs = 4\n",
    "num_outputs = 1\n",
    "num_hiddens=20\n",
    "\n",
    "\n",
    "w = nd.random.normal(scale=1, shape=(num_inputs, num_hiddens))\n",
    "b = nd.zeros(num_hiddens)\n",
    "w1=nd.random.normal(scale=1, shape=(num_hiddens, num_outputs))\n",
    "b1= nd.zeros(num_outputs)\n",
    "\n",
    "w.attach_grad()\n",
    "b.attach_grad()\n",
    "w1.attach_grad()\n",
    "b1.attach_grad()\n",
    "\n",
    "params=[w,b,w1,b1]\n",
    "print(w)\n",
    "def use_svg_display():\n",
    "    # 用矢量图显示\n",
    "    display.set_matplotlib_formats('svg')\n",
    "\n",
    "def set_figsize(figsize=(3.5, 2.5)):\n",
    "    use_svg_display()\n",
    "    # 设置图的尺寸\n",
    "    plt.rcParams['figure.figsize'] = figsize\n",
    "\n",
    "def squared_loss(y_hat, y):\n",
    "    return (y_hat - y) ** 2 / 2\n",
    "\n",
    "def relu(X):\n",
    "    return nd.maximum(X,0)\n",
    "\n",
    "def net(X):\n",
    "    H=relu(nd.dot(X,w)+b)\n",
    "    Y=nd.dot(H, w1) + b1\n",
    "    return Y\n",
    "\n",
    "def excel2matrix(path):\n",
    "    data = xlrd.open_workbook(path)\n",
    "    table = data.sheets()[0]\n",
    "    nrows = table.nrows  # 行数\n",
    "    ncols = table.ncols  # 列数\n",
    "    datamatrix = nd.random.normal(scale=1,shape=(nrows, ncols))\n",
    "    for i in range(nrows):\n",
    "        rows = table.row_values(i)\n",
    "        datamatrix[i,:] = rows\n",
    "    return datamatrix\n",
    " \n",
    "def data_iter(batch_size, features, labels):\n",
    "    num_examples = len(features)\n",
    "    indices = list(range(num_examples))\n",
    "    random.shuffle(indices)  # 样本的读取顺序是随机的\n",
    "    for i in range(0, num_examples, batch_size):\n",
    "        j = nd.array(indices[i: min(i + batch_size, num_examples)])\n",
    "        yield features.take(j), labels.take(j)  # take函数根据索引返回对应元素\n",
    "# def cross_entropy(y_hat, y):\n",
    "#     return -nd.pick(y_hat, y).log()\n",
    "# def accuracy(y_hat, y):\n",
    "#     return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()\n",
    "\n",
    "# def evaluate_accuracy(data_iter, net):\n",
    "#     acc_sum, n = 0.0, 0\n",
    "#     for X, y in data_iter:\n",
    "#         y = y.astype('float32')\n",
    "#         acc_sum += (net(X).argmax(axis=1) == y).sum().asscalar()\n",
    "#         n += y.size\n",
    "#     return acc_sum / n\n",
    "\n",
    "num_epochs, lr = 100, 0.0001\n",
    "\n",
    "def sgd(params, lr, batch_size):  \n",
    "    for param in params:\n",
    "        param[:] = param - lr * param.grad / batch_size\n",
    "\n",
    "def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n",
    "              params=None, lr=None):\n",
    "    for epoch in range(num_epochs):\n",
    "        for X, y in data_iter(batch_size,x,x_label):\n",
    "            with autograd.record():\n",
    "                y_hat = net(X)\n",
    "#                 print('X')\n",
    "#                 print(X)\n",
    "#                 print('y_hat')\n",
    "#                 print(y_hat)\n",
    "#                 print('y')\n",
    "#                 print(y)\n",
    "#                 print('[W,b]')\n",
    "#                 print([w,b])\n",
    "                l = loss(y_hat, y)\n",
    "            l.backward()   #求梯度\n",
    "            sgd(params, lr, batch_size)    #更新wb权重   \n",
    "#         print(params)\n",
    "        train_l_sum =loss(net(x),x_label)  #误差\n",
    "        print('epoch %d,loss %f' % (epoch + 1,train_l_sum.mean().asnumpy()))\n",
    "\n",
    "\n",
    "pathX = '标准化_272.xls'  #  113.xlsx 在当前文件夹下\n",
    "pathX2 = '标准化_272label.xls'  #  113.xlsx 在当前文件夹下\n",
    "pathX3 = '标准化_272pre.xls'  #  113.xlsx 在当前文件夹下\n",
    "x = excel2matrix(pathX)\n",
    "x_label=excel2matrix(pathX2)\n",
    "y_test=excel2matrix(pathX3)\n",
    "y_label=nd.zeros((y_test.shape[0],1))\n",
    "\n",
    "train_iter=data_iter(batch_size,x,x_label)\n",
    "test_iter=data_iter(batch_size,y_test,y_label)\n",
    "train_ch3(net, train_iter, test_iter, squared_loss, num_epochs, batch_size,params, lr)\n",
    "#set_figsize()\n",
    "#plt.scatter(x[:, 1].asnumpy(), x_label[:, 0].asnumpy(), 1);  # 加分号只显示图\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "7001e715",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[\n",
      "[[ -0.36773023  -0.7293374    0.41152233  -0.5144523   -0.58742744\n",
      "    8.368031    -3.546355    -6.094584    -2.763431     4.9860725\n",
      "    3.0229697   -1.8874447   -2.0963557   -0.15811183   0.12214036\n",
      "   -3.5092335    0.45851114  -4.728452     1.64679     -0.84484416]\n",
      " [  3.3752275   -3.3886585   -0.03930783  -4.6160784   -4.4326067\n",
      "    3.4558437   -8.498186    13.875785     9.126509     4.7032456\n",
      "    1.1868109   17.613836    -3.1671627   -0.03256728   0.02874785\n",
      "    5.270388    -0.08126932 -14.342475    -3.93496     -1.8244916 ]\n",
      " [  0.23426081  -0.47512996  -0.81406915  -1.0725749   -1.768299\n",
      "    0.45911574  -0.8501322   -0.98802346   1.7927341   -1.980763\n",
      "    0.62824297  -0.01933188   0.3432565   -2.307829     1.6547658\n",
      "    0.63119054   0.8539068    0.09533978   1.9006089    0.835268  ]\n",
      " [  0.96729714   0.03187471   0.3876903    0.46961153  -0.2597674\n",
      "   -7.0020843    2.322877     7.3219414    6.2411604   -0.64617616\n",
      "   -3.6120398    4.6562448   -1.3287314   -0.24446206  -0.14642967\n",
      "    3.595898     0.2611595   -1.3066992   -1.4258983   -0.43782878]]\n",
      "<NDArray 4x20 @cpu(0)>, \n",
      "[  0.9873509    1.1731526   -0.09824429   1.6095172   -7.3304057\n",
      "   7.734953   -12.424595    -2.965206    -2.2015464   -7.3355594\n",
      "   2.3882334    5.591913     1.1375694   -0.06319674  -0.42115426\n",
      "  16.206554    -0.46068153   4.693003    -0.5194534    0.597637  ]\n",
      "<NDArray 20 @cpu(0)>]\n"
     ]
    }
   ],
   "source": [
    "print([w,b])\n",
    "pathX4 = '标准化_272pre_304.xls'  #  113.xlsx 在当前文件夹下\n",
    "y_test1=excel2matrix(pathX4)\n",
    "a=net(y_test1)\n",
    "\n",
    "set_figsize()\n",
    "#plt.scatter(x[:, 1].asnumpy(), x_label[:, 0].asnumpy(), 1);  # 加分号只显示图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "77210598",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[385.45468]\n",
      " [445.71713]\n",
      " [532.7162 ]\n",
      " ...\n",
      " [493.95724]\n",
      " [485.0862 ]\n",
      " [493.95724]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "a1=a.asnumpy()\n",
    "print(a1)\n",
    "np.savetxt(\"./result.txt\",a1,fmt='%d')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d602ff85",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:gluon] *",
   "language": "python",
   "name": "conda-env-gluon-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
