{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "cb9e3f4a",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1000, 4)\n"
     ]
    }
   ],
   "source": [
    "import d2lzh as d2l\n",
    "import xlrd\n",
    "import math\n",
    "from mxnet import autograd, nd\n",
    "from mxnet.gluon import loss as gloss\n",
    "import time\n",
    "#读取数据集\n",
    "# (corpus_indices, char_to_idx, idx_to_char,\n",
    "#  vocab_size) = d2l.load_data_jay_lyrics()\n",
    "def excel2matrix(path):\n",
    "    data = xlrd.open_workbook(path)\n",
    "    table = data.sheets()[0]\n",
    "    nrows = table.nrows  # 行数\n",
    "    ncols = table.ncols  # 列数\n",
    "    datamatrix = nd.random.normal(scale=1,shape=(nrows, ncols))\n",
    "    for i in range(nrows):\n",
    "        rows = table.row_values(i)\n",
    "        datamatrix[i,:] = rows\n",
    "    return datamatrix\n",
    "          \n",
    "pathX = '标准化_300rnn.xls'  #  113.xlsx 在当前文件夹下\n",
    "y = excel2matrix(pathX)\n",
    "print(y.shape)\n",
    "corpus_indices=list(range(0,y.shape[0]))\n",
    "\n",
    "cout=[]\n",
    "\n",
    "#输入   隐藏层  输\n",
    "num_inputs, num_hiddens, num_outputs = 3, 100, 1\n",
    "#gpu类型\n",
    "ctx = d2l.try_gpu()\n",
    "def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None): \n",
    "    corpus_indices = nd.array(corpus_indices, ctx=ctx)\n",
    "    data_len = len(corpus_indices)\n",
    "    batch_len = data_len // batch_size\n",
    "    indices = corpus_indices[0: batch_size*batch_len].reshape((\n",
    "        batch_size, batch_len))\n",
    "    epoch_size = (batch_len - 1) // num_steps\n",
    "    for i in range(epoch_size):\n",
    "        i = i * num_steps\n",
    "        X = indices[:, i: i + num_steps]\n",
    "        Y = indices[:, i + 1: i + num_steps + 1]\n",
    "        yield X, Y\n",
    "#     # 减1是因为输出的索引是相应输入的索引加1\n",
    "#     num_examples = (len(corpus_indices) - 1) // num_steps  #x步多少个数据  4\n",
    "#     epoch_size = num_examples // batch_size   #多少批次      2\n",
    "#     example_indices = list(range(num_examples))#  所有x步以后数据的序列 4\n",
    "#     random.shuffle(example_indices)\n",
    "\n",
    "#     # 返回从pos开始的长为num_steps的序列\n",
    "#     def _data(pos):\n",
    "#         return corpus_indices[pos: pos + num_steps]\n",
    "\n",
    "#     for i in range(epoch_size): #多少批\n",
    "#         # 每次读取batch_size个随机样本\n",
    "#         i = i * batch_size #批量大小  \n",
    "#         batch_indices = example_indices[i: i + batch_size] \n",
    "#         X = [_data(j * num_steps) for j in batch_indices]\n",
    "#         Y = [_data(j * num_steps + 1) for j in batch_indices]\n",
    "\n",
    "#         yield nd.array(X, ctx), nd.array(Y, ctx)\n",
    "\n",
    "     \n",
    "#初始参数\n",
    "def get_params():\n",
    "    #向量初始化\n",
    "    def _one(shape):\n",
    "        return nd.random.normal(scale=1, shape=shape, ctx=ctx)\n",
    "\n",
    "    # 隐藏层参数\n",
    "    W_xh = _one((num_inputs, num_hiddens))\n",
    "    W_hh = _one((num_hiddens, num_hiddens))\n",
    "    b_h = nd.zeros(num_hiddens, ctx=ctx)\n",
    "    # 输出层参数\n",
    "    W_hq = _one((num_hiddens, num_outputs))\n",
    "    b_q = nd.zeros(num_outputs, ctx=ctx)\n",
    "    # 附上梯度\n",
    "    params = [W_xh, W_hh, b_h, W_hq, b_q]\n",
    "    for param in params:\n",
    "        param.attach_grad()\n",
    "    return params\n",
    "#初始网络隐藏层状态\n",
    "def init_rnn_state(batch_size, num_hiddens, ctx):\n",
    "    return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), )\n",
    "\n",
    "def rnn(inputs, state, params):\n",
    "    # inputs和outputs皆为num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "    W_xh, W_hh, b_h, W_hq, b_q = params\n",
    "    H, = state\n",
    "    outputs = []\n",
    "    for X in inputs:\n",
    "        H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)\n",
    "        Y = nd.dot(H, W_hq) + b_q\n",
    "        outputs.append(Y)\n",
    "    return outputs, (H,)\n",
    "\n",
    "# state = init_rnn_state(X.shape[0], num_hiddens, ctx)\n",
    "# inputs = to_onehot(X.as_in_context(ctx), vocab_size)\n",
    "# params = get_params()\n",
    "# outputs, state_new = rnn(inputs, state, params)\n",
    "# len(outputs), outputs[0].shape, state_new[0].shape\n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def predict_rnn(prefix, rnn, params, init_rnn_state,\n",
    "                num_hiddens,  ctx):\n",
    "    \n",
    "    state = init_rnn_state(1, num_hiddens, ctx)\n",
    "    #print(prefix.)\n",
    "    output = prefix\n",
    "\n",
    "    outt=[]\n",
    "    for t in range(0,prefix.shape[0]):\n",
    "        # 将上一时间步的输出作为当前时间步的输入\n",
    "        \n",
    "        X = [output[t,:]]\n",
    "#         print('X')\n",
    "#         print(X)\n",
    "        # 计算输出和更新隐藏状态\n",
    "        (Y, state) = rnn(X, state, params)\n",
    "        # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n",
    "        \n",
    "        outt.append(Y)\n",
    "    #print(outt)\n",
    "    return outt\n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def grad_clipping(params, theta, ctx):\n",
    "    norm = nd.array([0], ctx)\n",
    "    for param in params:\n",
    "        norm += (param.grad ** 2).sum()\n",
    "    norm = norm.sqrt().asscalar()\n",
    "    if norm > theta:\n",
    "        for param in params:\n",
    "            param.grad[:] *= theta / norm\n",
    "            \n",
    "def to_onehotx(X, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in X.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,0:3])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q \n",
    "\n",
    "def to_onehoty(Y, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in Y.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,3])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q      \n",
    "\n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                          ctx, corpus_indices,  is_random_iter, num_epochs, num_steps,lr, clipping_theta, batch_size,\n",
    "                           prefixes):\n",
    "    if is_random_iter:\n",
    "        data_iter_fn = data_iter_random\n",
    "    else:\n",
    "        data_iter_fn = d2l.data_iter_consecutive\n",
    "        \n",
    "    \n",
    "    loss = gloss.L2Loss()\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        if not is_random_iter:  # 如使用相邻采样，在epoch开始时初始化隐藏状态\n",
    "            state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
    "        \n",
    "        l_sum, n, start = 0.0, 0, time.time()\n",
    "        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)\n",
    "       \n",
    "        for X, Y in data_iter:\n",
    "            \n",
    "            LOSS=0\n",
    "            if is_random_iter:  # 如使用随机采样，在每个小批量更新前初始化隐藏状态\n",
    "                state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
    "            else:  # 否则需要使用detach函数从计算图分离隐藏状态\n",
    "                for s in state:\n",
    "                    s.detach()\n",
    "            with autograd.record():\n",
    "                #print(X)\n",
    "                inputs = to_onehotx(X, 5)\n",
    "                \n",
    "                #inputs = X#.reshape(100,)\n",
    "                \n",
    "                #print(inputs.shape)\n",
    "                #print(inputs)\n",
    "                # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "                (outputs, state) = rnn(inputs, state, params)\n",
    "                #print(outputs)\n",
    "                # 连结之后形状为(num_steps * batch_size, vocab_size)\n",
    "                outputs = nd.concat(*outputs, dim=0)\n",
    "                #print(outputs.shape)\n",
    "                #print(Y.shape)\n",
    "                #outputs=outputs.reshape(2000,)\n",
    "                # Y的形状是(batch_size, num_steps)，转置后再变成长度为\n",
    "                # batch * num_steps 的向量，这样跟输出的行一一对应\n",
    "                s=to_onehoty(Y, 5)\n",
    "                #print(s)\n",
    "                y = nd.concat(*s, dim=0)\n",
    "                # 使用交叉熵损失计算平均分类误差\n",
    "#                 print(\"outputs\")\n",
    "#                 print(outputs)\n",
    "#                 print(\"y\")\n",
    "#                 print(y)\n",
    "                \n",
    "                l = loss(outputs, y).mean()\n",
    "                LOSS=LOSS+l\n",
    "                \n",
    "                #print(LOSS)\n",
    "            l.backward()\n",
    "            \n",
    "            grad_clipping(params, clipping_theta, ctx)  # 裁剪梯度\n",
    "            d2l.sgd(params, lr, 1)  # 因为误差已经取过均值，梯度不用再做平均\n",
    "            #l_sum += l.asscalar() * y.size\n",
    "            #n += y.size\n",
    "        if (epoch + 1) % 3 == 0:\n",
    "            print('epoch %d, time %.2f sec, loss' % (\n",
    "                epoch + 1, time.time() - start),LOSS)\n",
    "            \n",
    "            predict_rnn(prefixes, rnn, params, init_rnn_state,\n",
    "                                    num_hiddens,  ctx, )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "7001e715",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 3, time 5.95 sec, loss \n",
      "[268.25134]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 6, time 5.57 sec, loss \n",
      "[144.77704]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 9, time 5.66 sec, loss \n",
      "[564.5572]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 12, time 5.90 sec, loss \n",
      "[567.4597]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 15, time 5.56 sec, loss \n",
      "[210.45886]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 18, time 5.64 sec, loss \n",
      "[353.06717]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 21, time 5.64 sec, loss \n",
      "[462.77817]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 24, time 5.57 sec, loss \n",
      "[1234.6029]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 27, time 5.65 sec, loss \n",
      "[365.5474]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 30, time 5.65 sec, loss \n",
      "[1196.1575]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 33, time 5.62 sec, loss \n",
      "[413.13574]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 36, time 5.60 sec, loss \n",
      "[792.6958]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 39, time 5.67 sec, loss \n",
      "[242.96152]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 42, time 5.58 sec, loss \n",
      "[377.33298]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 45, time 5.64 sec, loss \n",
      "[411.97794]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 48, time 5.64 sec, loss \n",
      "[882.4457]\n",
      "<NDArray 1 @cpu(0)>\n"
     ]
    }
   ],
   "source": [
    "pathX1 = '标准化_300rnnpre.xls'  #  113.xlsx 在当前文件夹下\n",
    "prefixes1 = excel2matrix(pathX1)\n",
    "\n",
    "def to_onehotp(Y, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in Y.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,0:2])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q \n",
    "\n",
    "prefixes2=prefixes1\n",
    "\n",
    "precorpus_indices=list(range(0,prefixes2.shape[0]))\n",
    "\n",
    "prefixes=prefixes2\n",
    "# print('prefixes')\n",
    "# print(prefixes)\n",
    "params = get_params()\n",
    "\n",
    "num_epochs, num_steps, batch_size, lr, clipping_theta = 50, 2, 5, 1e3, 1e-2\n",
    "\n",
    "\n",
    "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                       ctx, corpus_indices, True, num_epochs, num_steps, lr,\n",
    "                      clipping_theta, batch_size, \n",
    "                      prefixes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "83d894b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "num_inputs, num_hiddens, num_outputs = 3, 100, 1\n",
    "cout=predict_rnn(prefixes, rnn, params, init_rnn_state,\n",
    "                                     num_hiddens,  ctx)\n",
    "\n",
    "#params = [W_xh, W_hh, b_h, W_hq, b_q]\n",
    "# state = init_rnn_state(1, num_hiddens, ctx)\n",
    "# output1 = prefixes\n",
    "\n",
    "# outt=[]\n",
    "# for t in range(0,199):\n",
    "#     X = [output1[t,:]]\n",
    "# #     print(X)\n",
    "# #     print(params[0])\n",
    "#     (Y, state) = rnn(X, state, params)\n",
    "#     outt.append(Y)\n",
    "# print(outt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "5740ab89",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ 62.475945  90.09029  -28.215952 ... 273.58966  274.83228  224.48975 ]\n"
     ]
    }
   ],
   "source": [
    "opp=[]\n",
    "for a in cout:\n",
    "    for c1 in a:\n",
    "        for c2 in c1:\n",
    "            opp.append(c2[0].asscalar())\n",
    "        \n",
    "\n",
    "import numpy as np\n",
    "a1=nd.array(opp)\n",
    "a2=a1.asnumpy()\n",
    "print(a2)\n",
    "np.savetxt(\"./result.txt\",a2,fmt='%d')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6361ddd",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:gluon] *",
   "language": "python",
   "name": "conda-env-gluon-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
