{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 287,
   "id": "cb9e3f4a",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1800, 5)\n"
     ]
    }
   ],
   "source": [
    "import d2lzh as d2l\n",
    "import math\n",
    "from mxnet import autograd, nd\n",
    "from mxnet.gluon import loss as gloss\n",
    "import time\n",
    "#读取数据集\n",
    "# (corpus_indices, char_to_idx, idx_to_char,\n",
    "#  vocab_size) = d2l.load_data_jay_lyrics()\n",
    "\n",
    "pathX = '272rnn.xls'  #  113.xlsx 在当前文件夹下\n",
    "y = excel2matrix(pathX)\n",
    "print(y.shape)\n",
    "corpus_indices=list(range(0,y.shape[0]))\n",
    "\n",
    "cout=[]\n",
    "\n",
    "#输入   隐藏层  输\n",
    "num_inputs, num_hiddens, num_outputs = 4, 100, 1\n",
    "#gpu类型\n",
    "ctx = d2l.try_gpu()\n",
    "def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None): \n",
    "    corpus_indices = nd.array(corpus_indices, ctx=ctx)\n",
    "    data_len = len(corpus_indices)\n",
    "    batch_len = data_len // batch_size\n",
    "    indices = corpus_indices[0: batch_size*batch_len].reshape((\n",
    "        batch_size, batch_len))\n",
    "    epoch_size = (batch_len - 1) // num_steps\n",
    "    for i in range(epoch_size):\n",
    "        i = i * num_steps\n",
    "        X = indices[:, i: i + num_steps]\n",
    "        Y = indices[:, i + 1: i + num_steps + 1]\n",
    "        yield X, Y\n",
    "#     # 减1是因为输出的索引是相应输入的索引加1\n",
    "#     num_examples = (len(corpus_indices) - 1) // num_steps  #x步多少个数据  4\n",
    "#     epoch_size = num_examples // batch_size   #多少批次      2\n",
    "#     example_indices = list(range(num_examples))#  所有x步以后数据的序列 4\n",
    "#     random.shuffle(example_indices)\n",
    "\n",
    "#     # 返回从pos开始的长为num_steps的序列\n",
    "#     def _data(pos):\n",
    "#         return corpus_indices[pos: pos + num_steps]\n",
    "\n",
    "#     for i in range(epoch_size): #多少批\n",
    "#         # 每次读取batch_size个随机样本\n",
    "#         i = i * batch_size #批量大小  \n",
    "#         batch_indices = example_indices[i: i + batch_size] \n",
    "#         X = [_data(j * num_steps) for j in batch_indices]\n",
    "#         Y = [_data(j * num_steps + 1) for j in batch_indices]\n",
    "\n",
    "#         yield nd.array(X, ctx), nd.array(Y, ctx)\n",
    "         \n",
    "     \n",
    "#初始参数\n",
    "def get_params():\n",
    "    #向量初始化\n",
    "    def _one(shape):\n",
    "        return nd.random.normal(scale=1, shape=shape, ctx=ctx)\n",
    "\n",
    "    # 隐藏层参数\n",
    "    W_xh = _one((num_inputs, num_hiddens))\n",
    "    W_hh = _one((num_hiddens, num_hiddens))\n",
    "    b_h = nd.zeros(num_hiddens, ctx=ctx)\n",
    "    # 输出层参数\n",
    "    W_hq = _one((num_hiddens, num_outputs))\n",
    "    b_q = nd.zeros(num_outputs, ctx=ctx)\n",
    "    # 附上梯度\n",
    "    params = [W_xh, W_hh, b_h, W_hq, b_q]\n",
    "    for param in params:\n",
    "        param.attach_grad()\n",
    "    return params\n",
    "#初始网络隐藏层状态\n",
    "def init_rnn_state(batch_size, num_hiddens, ctx):\n",
    "    return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), )\n",
    "\n",
    "def rnn(inputs, state, params):\n",
    "    # inputs和outputs皆为num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "    W_xh, W_hh, b_h, W_hq, b_q = params\n",
    "    H, = state\n",
    "    outputs = []\n",
    "    for X in inputs:\n",
    "        H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)\n",
    "        Y = nd.dot(H, W_hq) + b_q\n",
    "        outputs.append(Y)\n",
    "    return outputs, (H,)\n",
    "\n",
    "# state = init_rnn_state(X.shape[0], num_hiddens, ctx)\n",
    "# inputs = to_onehot(X.as_in_context(ctx), vocab_size)\n",
    "# params = get_params()\n",
    "# outputs, state_new = rnn(inputs, state, params)\n",
    "# len(outputs), outputs[0].shape, state_new[0].shape\n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def predict_rnn(prefix, rnn, params, init_rnn_state,\n",
    "                num_hiddens,  ctx):\n",
    "    \n",
    "    state = init_rnn_state(1, num_hiddens, ctx)\n",
    "    #print(prefix.)\n",
    "    output = prefix\n",
    "\n",
    "    outt=[]\n",
    "    for t in range(0,prefix.shape[0]):\n",
    "        # 将上一时间步的输出作为当前时间步的输入\n",
    "        \n",
    "        X = [output[t,:]]\n",
    "#         print('X')\n",
    "#         print(X)\n",
    "        # 计算输出和更新隐藏状态\n",
    "        (Y, state) = rnn(X, state, params)\n",
    "        # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n",
    "        \n",
    "        outt.append(Y)\n",
    "    #print(outt)\n",
    "    return outt\n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def grad_clipping(params, theta, ctx):\n",
    "    norm = nd.array([0], ctx)\n",
    "    for param in params:\n",
    "        norm += (param.grad ** 2).sum()\n",
    "    norm = norm.sqrt().asscalar()\n",
    "    if norm > theta:\n",
    "        for param in params:\n",
    "            param.grad[:] *= theta / norm\n",
    "            \n",
    "def to_onehotx(X, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in X.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,0:4])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q \n",
    "\n",
    "def to_onehoty(Y, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in Y.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,4])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q             \n",
    "# 本函数已保存在d2lzh包中方便以后使用\n",
    "def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                          ctx, corpus_indices,  is_random_iter, num_epochs, num_steps,lr, clipping_theta, batch_size,\n",
    "                           prefixes):\n",
    "    if is_random_iter:\n",
    "        data_iter_fn = data_iter_random\n",
    "    else:\n",
    "        data_iter_fn = d2l.data_iter_consecutive\n",
    "        \n",
    "    \n",
    "    loss = gloss.L2Loss()\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        if not is_random_iter:  # 如使用相邻采样，在epoch开始时初始化隐藏状态\n",
    "            state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
    "        \n",
    "        l_sum, n, start = 0.0, 0, time.time()\n",
    "        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)\n",
    "        for X, Y in data_iter:\n",
    "            LOSS=0\n",
    "            if is_random_iter:  # 如使用随机采样，在每个小批量更新前初始化隐藏状态\n",
    "                state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
    "            else:  # 否则需要使用detach函数从计算图分离隐藏状态\n",
    "                for s in state:\n",
    "                    s.detach()\n",
    "            with autograd.record():\n",
    "                #print(X)\n",
    "                inputs = to_onehotx(X, 5)\n",
    "                \n",
    "                #inputs = X#.reshape(100,)\n",
    "                \n",
    "                #print(inputs.shape)\n",
    "                #print(inputs)\n",
    "                # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "                (outputs, state) = rnn(inputs, state, params)\n",
    "                #print(outputs)\n",
    "                # 连结之后形状为(num_steps * batch_size, vocab_size)\n",
    "                outputs = nd.concat(*outputs, dim=0)\n",
    "                #print(outputs.shape)\n",
    "                #print(Y.shape)\n",
    "                #outputs=outputs.reshape(2000,)\n",
    "                # Y的形状是(batch_size, num_steps)，转置后再变成长度为\n",
    "                # batch * num_steps 的向量，这样跟输出的行一一对应\n",
    "                s=to_onehoty(Y, 5)\n",
    "                #print(s)\n",
    "                y = nd.concat(*s, dim=0)\n",
    "                # 使用交叉熵损失计算平均分类误差\n",
    "                #print(outputs)\n",
    "                \n",
    "                #print(y)\n",
    "                \n",
    "                l = loss(outputs, y).mean()\n",
    "                LOSS=LOSS+l\n",
    "                #print(l)\n",
    "            l.backward()\n",
    "            \n",
    "            grad_clipping(params, clipping_theta, ctx)  # 裁剪梯度\n",
    "            d2l.sgd(params, lr, 1)  # 因为误差已经取过均值，梯度不用再做平均\n",
    "            #l_sum += l.asscalar() * y.size\n",
    "            #n += y.size\n",
    "        if (epoch + 1) % 2 == 0:\n",
    "            print('epoch %d, time %.2f sec, loss' % (\n",
    "                epoch + 1, time.time() - start),LOSS)\n",
    "            \n",
    "            predict_rnn(prefixes, rnn, params, init_rnn_state,\n",
    "                                    num_hiddens,  ctx, )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 292,
   "id": "7001e715",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 2, time 4.32 sec, loss \n",
      "[257533.08]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 4, time 4.26 sec, loss \n",
      "[219567.81]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 6, time 4.24 sec, loss \n",
      "[173511.14]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 8, time 4.34 sec, loss \n",
      "[125639.1]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 10, time 4.26 sec, loss \n",
      "[83601.734]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 12, time 4.59 sec, loss \n",
      "[53354.105]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 14, time 4.30 sec, loss \n",
      "[37518.613]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 16, time 4.55 sec, loss \n",
      "[31473.936]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 18, time 4.34 sec, loss \n",
      "[28518.02]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 20, time 4.47 sec, loss \n",
      "[26707.969]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 22, time 4.36 sec, loss \n",
      "[25973.574]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 24, time 4.26 sec, loss \n",
      "[25173.365]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 26, time 4.26 sec, loss \n",
      "[24422.201]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 28, time 4.28 sec, loss \n",
      "[23708.713]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 30, time 4.31 sec, loss \n",
      "[23006.348]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 32, time 4.27 sec, loss \n",
      "[22470.885]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 34, time 4.65 sec, loss \n",
      "[22084.916]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 36, time 4.33 sec, loss \n",
      "[21852.846]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 38, time 4.29 sec, loss \n",
      "[21645.527]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 40, time 4.23 sec, loss \n",
      "[21515.666]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 42, time 4.28 sec, loss \n",
      "[21405.512]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 44, time 4.27 sec, loss \n",
      "[21381.238]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 46, time 4.30 sec, loss \n",
      "[21285.21]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 48, time 4.29 sec, loss \n",
      "[21162.264]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 50, time 4.27 sec, loss \n",
      "[21097.008]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 52, time 4.29 sec, loss \n",
      "[20888.543]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 54, time 4.31 sec, loss \n",
      "[20713.67]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 56, time 4.26 sec, loss \n",
      "[20724.992]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 58, time 4.38 sec, loss \n",
      "[20482.572]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 60, time 4.73 sec, loss \n",
      "[20328.156]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 62, time 4.27 sec, loss \n",
      "[20159.562]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 64, time 4.26 sec, loss \n",
      "[20006.47]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 66, time 4.29 sec, loss \n",
      "[19884.709]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 68, time 4.26 sec, loss \n",
      "[19667.137]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 70, time 4.24 sec, loss \n",
      "[19449.166]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 72, time 4.27 sec, loss \n",
      "[19245.125]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 74, time 4.26 sec, loss \n",
      "[19090.105]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 76, time 4.42 sec, loss \n",
      "[19107.926]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 78, time 4.36 sec, loss \n",
      "[19093.69]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 80, time 4.57 sec, loss \n",
      "[18902.854]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 82, time 4.25 sec, loss \n",
      "[18740.287]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 84, time 4.27 sec, loss \n",
      "[18678.697]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 86, time 4.24 sec, loss \n",
      "[18544.998]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 88, time 4.27 sec, loss \n",
      "[18468.322]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 90, time 4.33 sec, loss \n",
      "[18339.281]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 92, time 4.22 sec, loss \n",
      "[18255.67]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 94, time 4.30 sec, loss \n",
      "[18164.072]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 96, time 4.28 sec, loss \n",
      "[18092.773]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 98, time 4.26 sec, loss \n",
      "[18003.824]\n",
      "<NDArray 1 @cpu(0)>\n",
      "epoch 100, time 4.43 sec, loss \n",
      "[17930.527]\n",
      "<NDArray 1 @cpu(0)>\n"
     ]
    }
   ],
   "source": [
    "pathX1 = '272rnnpre.xls'  #  113.xlsx 在当前文件夹下\n",
    "prefixes1 = excel2matrix(pathX1)\n",
    "\n",
    "def to_onehotp(Y, size):  # 本函数已保存在d2lzh包中方便以后使用\n",
    "    q=[]\n",
    "    for z in Y.T:\n",
    "        op=[]\n",
    "        for g in z:\n",
    "            op.append(y[g,0:4])\n",
    "        \n",
    "        q.append(nd.concat(*op,dim=0))\n",
    "    return q \n",
    "\n",
    "prefixes2=prefixes1\n",
    "\n",
    "precorpus_indices=list(range(0,prefixes2.shape[0]))\n",
    "\n",
    "prefixes=prefixes2\n",
    "# print('prefixes')\n",
    "# print(prefixes)\n",
    "params = get_params()\n",
    "\n",
    "num_epochs, num_steps, batch_size, lr, clipping_theta = 100, 3, 10, 1e1, 1e-2\n",
    "\n",
    "\n",
    "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                       ctx, corpus_indices, True, num_epochs, num_steps, lr,\n",
    "                      clipping_theta, batch_size, \n",
    "                      prefixes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 293,
   "id": "83c4633f",
   "metadata": {},
   "outputs": [],
   "source": [
    "num_inputs, num_hiddens, num_outputs = 4, 100, 1\n",
    "cout=predict_rnn(prefixes, rnn, params, init_rnn_state,\n",
    "                                     num_hiddens,  ctx)\n",
    "\n",
    "#params = [W_xh, W_hh, b_h, W_hq, b_q]\n",
    "# state = init_rnn_state(1, num_hiddens, ctx)\n",
    "# output1 = prefixes\n",
    "\n",
    "# outt=[]\n",
    "# for t in range(0,199):\n",
    "#     X = [output1[t,:]]\n",
    "# #     print(X)\n",
    "# #     print(params[0])\n",
    "#     (Y, state) = rnn(X, state, params)\n",
    "#     outt.append(Y)\n",
    "# print(outt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 295,
   "id": "d19cd0d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ 65.59674   34.25896   43.146492 ... 773.5844   773.5844   773.5844  ]\n"
     ]
    }
   ],
   "source": [
    "opp=[]\n",
    "for a in cout:\n",
    "    for c1 in a:\n",
    "        for c2 in c1:\n",
    "            opp.append(c2[0].asscalar())\n",
    "        \n",
    "\n",
    "import numpy as np\n",
    "a1=nd.array(opp)\n",
    "a2=a1.asnumpy()\n",
    "print(a2)\n",
    "np.savetxt(\"./result.txt\",a2,fmt='%d')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09176ee2",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:gluon] *",
   "language": "python",
   "name": "conda-env-gluon-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
