{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "37657339",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'1.12.0.post2'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "torch.__version__"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "36318e17",
   "metadata": {},
   "source": [
    "# Embeding "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "df0ab246",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# torch embedding\n",
    "\n",
    "embed_torch = torch.nn.Embedding(num_embeddings=512, embedding_dim=10)\n",
    "torch.nn.init.constant_(embed_torch.weight, val=0.1)\n",
    "\n",
    "i_p = np.random.randint(3, 4, (1, 1))\n",
    "i_p = torch.from_numpy(i_p)\n",
    "\n",
    "embed_torch(i_p)\n",
    "\"\"\"\n",
    "tensor([[[0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000, 0.1000,\n",
    "          0.1000, 0.1000]]], grad_fn=<EmbeddingBackward0>)\n",
    "\"\"\"\n",
    "\n",
    "# tensorflow keras\n",
    "k_init = tf.keras.initializers.Constant(value=0.1)\n",
    "embed_keras = tf.keras.layers.Embedding(input_dim=512, output_dim=10, embeddings_initializer=k_init)\n",
    "embed_keras.build(input_shape=(None, 1))\n",
    "i_p = np.random.randint(3, 4, (1, 1))\n",
    "embed_keras(i_p)\n",
    "\"\"\" <tf.Tensor: shape=(1, 1, 10), dtype=float32, numpy=\n",
    "array([[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]],\n",
    "      dtype=float32)> \"\"\"\n",
    "\n",
    "1 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "77bdbb08",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 72,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lstm_torch = torch.nn.LSTM(input_size=10, hidden_size=8, batch_first=True)\n",
    "for w in lstm_torch.all_weights[0]:\n",
    "    torch.nn.init.constant_(w, val=0.1)\n",
    "i_p = np.random.random((1, 10))\n",
    "i_p.fill(0.2)\n",
    "i_p = torch.from_numpy(i_p)\n",
    "i_p = i_p.to(torch.float32)\n",
    "output, (cell, hidden) = lstm_torch(i_p)\n",
    "\"\"\"\n",
    "output\n",
    "tensor([[0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339]],\n",
    "       grad_fn=<SqueezeBackward1>)\n",
    "       \n",
    "cell:\n",
    "tensor([[0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339, 0.1339]],\n",
    "       grad_fn=<SqueezeBackward1>)\n",
    "       \n",
    "hidden:  \n",
    "tensor([[0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275]],\n",
    "       grad_fn=<SqueezeBackward1>)\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "id": "52cc4ed9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([32, 8])"
      ]
     },
     "execution_count": 100,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lstm_torch.weight_hh_l0.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a12d52d6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "a68f20df",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275, 0.2275]],\n",
       "       grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hidden"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "8fff7124",
   "metadata": {},
   "outputs": [],
   "source": [
    "# lstm_torch.all_weights[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "id": "cba348c2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<tf.Tensor: shape=(1, 8), dtype=float32, numpy=\n",
       " array([[0.02744377, 0.02744377, 0.02744377, 0.02744377, 0.02744377,\n",
       "         0.02744377, 0.02744377, 0.02744377]], dtype=float32)>,\n",
       " <tf.Tensor: shape=(1, 8), dtype=float32, numpy=\n",
       " array([[0.02744377, 0.02744377, 0.02744377, 0.02744377, 0.02744377,\n",
       "         0.02744377, 0.02744377, 0.02744377]], dtype=float32)>,\n",
       " <tf.Tensor: shape=(1, 8), dtype=float32, numpy=\n",
       " array([[0.05232362, 0.05232362, 0.05232362, 0.05232362, 0.05232362,\n",
       "         0.05232362, 0.05232362, 0.05232362]], dtype=float32)>]"
      ]
     },
     "execution_count": 103,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "k_init = tf.keras.initializers.Constant(value=0.1)\n",
    "lstm_keras = tf.keras.layers.LSTM(units=8, \n",
    "                                 use_bias=False,\n",
    "                                 kernel_initializer=k_init,\n",
    "                                 recurrent_initializer=k_init, \n",
    "                                  return_state=True,\n",
    "                                )\n",
    "# lstm_keras.build(input_shape=(None, 10))\n",
    "i_p = np.random.random((1, 1, 10))\n",
    "i_p.fill(0.1)\n",
    "lstm_keras(i_p)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "id": "b81c498a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 0.02744377*0.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "id": "0a415571",
   "metadata": {},
   "outputs": [],
   "source": [
    "W_x=lstm_keras.get_weights()[0] #上一时刻的权重，此时维度为（20,128），因为他把4个权重都集成了\n",
    "W_h = lstm_keras.get_weights()[1] # 输入的权重，此时的维度为（32,128），也是将4个权重集成在了一起\n",
    "# bias = model.get_weights()[2] #偏置，维度为（128），将3个门控和当前状态的各32个偏置集成到一起，\n",
    "\n",
    "w_ix=W_x[:,:32]  #输入门控中，输入层和循环层的权重\n",
    "w_ih=W_h[:,:32]  # 输入门控中，上一时刻循环层和当前的循环层之间的权重"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "id": "0fe285c8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(10, 32)"
      ]
     },
     "execution_count": 115,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "w_ix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "122f09fc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a0312d05",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "id": "64a548f6",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'test2' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Input \u001b[0;32mIn [109]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m test2_t1\u001b[38;5;241m=\u001b[39m\u001b[43mtest2\u001b[49m[:,\u001b[38;5;241m0\u001b[39m,:]\n\u001b[1;32m      3\u001b[0m f_t1\u001b[38;5;241m=\u001b[39mactivation_relu(np\u001b[38;5;241m.\u001b[39mdot(test2_t1,w_fx)\u001b[38;5;241m+\u001b[39mb_f)\n\u001b[1;32m      4\u001b[0m i_t1\u001b[38;5;241m=\u001b[39mactivation_relu(np\u001b[38;5;241m.\u001b[39mdot(test2_t1,w_ix)\u001b[38;5;241m+\u001b[39mb_i)\n",
      "\u001b[0;31mNameError\u001b[0m: name 'test2' is not defined"
     ]
    }
   ],
   "source": [
    "test2_t1=test2[:,0,:]\n",
    "\n",
    "f_t1=activation_relu(np.dot(test2_t1,w_fx))\n",
    "i_t1=activation_relu(np.dot(test2_t1,w_ix))\n",
    "c_hat_t1=activation_relu(np.dot(test2_t1,w_cx))\n",
    "o_t1=activation_relu(np.dot(test2_t1,w_ox)+b_o)\n",
    "c_t1=np.multiply(i_t1,c_hat_t1)\n",
    "h_t1=np.multiply(o_t1,activation_relu(c_t1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d5be2f7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "id": "ca27f242",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1, 10)"
      ]
     },
     "execution_count": 86,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "i_p.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "id": "70c5b4ee",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "TensorShape([10, 32])"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lstm_keras.weights[0].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "0ff02147",
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "module 'keras.api._v2.keras.layers' has no attribute 'tf'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Input \u001b[0;32mIn [77]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkeras\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayers\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtf\u001b[49m(units\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m8\u001b[39m, \n\u001b[1;32m      2\u001b[0m                                 use_bias\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m      3\u001b[0m \u001b[38;5;66;03m#                                  kernel_initializer=k_init,\u001b[39;00m\n\u001b[1;32m      4\u001b[0m \u001b[38;5;66;03m#                                  recurrent_initializer=k_init, \u001b[39;00m\n\u001b[1;32m      5\u001b[0m                                )\n",
      "\u001b[0;31mAttributeError\u001b[0m: module 'keras.api._v2.keras.layers' has no attribute 'tf'"
     ]
    }
   ],
   "source": [
    " tf.keras.layers.tf(units=8, \n",
    "                                 use_bias=False,\n",
    "#                                  kernel_initializer=k_init,\n",
    "#                                  recurrent_initializer=k_init, \n",
    "                                )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "id": "19e0c4aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.keras.layers.LSTM?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5291989",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3e5bd073",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bed1818c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
