{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import os\n",
    "from transformers import OpenAIGPTTokenizer\n",
    "import tensorflow_text as tf_text\n",
    "from tqdm import trange"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Mixed precision compatibility check (mixed_float16): OK\n",
      "Your GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: NVIDIA GeForce RTX 2080 Ti, compute capability 7.5\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-07-24 11:10:11.820454: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "/home/roy/anaconda3/envs/tf/lib/python3.8/site-packages/keras/src/initializers/initializers.py:120: UserWarning: The initializer RandomNormal is unseeded and being called multiple times, which will return identical values each time (even if the initializer is unseeded). Please update your code to provide a seed to the initializer, or avoid using the same initializer instance more than once.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    }
   ],
   "source": [
    "model = tf.keras.models.load_model('saved_model/gpt1_model')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"gpt1\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
      " decoder (Decoder)           multiple                  74007552  \n",
      "                                                                 \n",
      " dense_36 (Dense)            multiple                  31127582  \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 105135134 (401.06 MB)\n",
      "Trainable params: 105135134 (401.06 MB)\n",
      "Non-trainable params: 0 (0.00 Byte)\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n",
    "vocab_size = len(tokenizer.get_vocab())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 43/43 [00:01<00:00, 38.25it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "it was saturday night, the street was packed with people and the girls were in the car. \n",
      " \" what's up? \" i asked. \n",
      " \" i'm not sure. \" \n",
      " \" what? \" \n",
      " \" i'm not sure. \"\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "#input_sentence = \"it was saturday night, the street was packed with people and the girls were in the car.\"\n",
    "#input_sentence = \"It was a sunny day and\"\n",
    "input_sentence = \"it was saturday night, the street\"\n",
    "token_id = tokenizer.encode(input_sentence)\n",
    "token_len = len(token_id)\n",
    "gen_seq_len = 50\n",
    "block_size = 512\n",
    "for i in trange(token_len, gen_seq_len):\n",
    "    input_data, mask = tf_text.pad_model_inputs(tf.reshape(tf.constant(token_id, tf.int64), [1,-1]), max_seq_length=block_size)\n",
    "    prediction, _ = model(input_data, training=False)\n",
    "    next_token_logit = prediction[0, len(token_id)-1, :vocab_size]\n",
    "    predict_token = tf.math.argmax(tf.math.softmax(next_token_logit)).numpy()\n",
    "    token_id.append(predict_token)\n",
    "print(tokenizer.decode(token_id))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'it was saturday night, the street was packed with people and the girls were in the car. \\n \" what\\'s up? \" i asked. \\n \" i\\'m not sure. \" \\n \" what? \" \\n \" i\\'m not sure. \"'"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(token_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "it was saturday night, the street was packed with people and the girls were in the car. \n",
      " \" what's up? \" i asked. \n",
      " \" i'm not sure. \" \n",
      " \" what? \" \n",
      " \" i'm not sure. \"\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.decode(token_id))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[507,\n",
       " 509,\n",
       " 5375,\n",
       " 995,\n",
       " 240,\n",
       " 481,\n",
       " 1984,\n",
       " 509,\n",
       " 4960,\n",
       " 556,\n",
       " 989,\n",
       " 488,\n",
       " 481,\n",
       " 2182,\n",
       " 641,\n",
       " 500,\n",
       " 481,\n",
       " 1267,\n",
       " 239,\n",
       " 40477,\n",
       " 244,\n",
       " 599,\n",
       " 535,\n",
       " 609,\n",
       " 257,\n",
       " 244,\n",
       " 249,\n",
       " 864,\n",
       " 239,\n",
       " 40477,\n",
       " 244,\n",
       " 249,\n",
       " 719,\n",
       " 595,\n",
       " 881,\n",
       " 239,\n",
       " 244,\n",
       " 40477,\n",
       " 244,\n",
       " 599,\n",
       " 257,\n",
       " 244,\n",
       " 40477,\n",
       " 244,\n",
       " 249,\n",
       " 719,\n",
       " 595,\n",
       " 881,\n",
       " 239,\n",
       " 244]"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "token_id"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.17"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
