{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "is_executing": false
    }
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
    "import numpy as np\n",
    "import os\n",
    "import re\n",
    "import string\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "class MultiHeadSelfAttention(layers.Layer):\n",
    "    def __init__(self,embed_dim,num_heads=8):\n",
    "        super(MultiHeadSelfAttention,self).__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_heads = num_heads\n",
    "        if embed_dim % num_heads != 0:\n",
    "            raise ValueError(\n",
    "                f\"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}\")\n",
    "        self.project_dim = embed_dim // num_heads\n",
    "        self.query_dense = layers.Dense(embed_dim)\n",
    "        self.key_dense = layers.Dense(embed_dim)\n",
    "        self.value_dense = layers.Dense(embed_dim)\n",
    "        self.combine_heads = layers.Dense(embed_dim)\n",
    "        \n",
    "    @staticmethod\n",
    "    def causal_attention_mask(n_dest,n_src,dtype):\n",
    "        i = tf.range(n_dest)[:,None]\n",
    "        j = tf.range(n_src)\n",
    "        m = i >=j - n_src + n_dest\n",
    "        return tf.cast(m,dtype)\n",
    "    \n",
    "    def attention(self,query,key,value):\n",
    "        score = tf.matmul(query,key,transpose_b=True)\n",
    "        dim_key = tf.cast(tf.shape(key)[-1],tf.float32)\n",
    "        scaled_score = score / tf.math.sqrt(dim_key)\n",
    "        \n",
    "        shape = tf.shape(scaled_score)\n",
    "        dim_dest,dim_src = shape[2],shape[3]\n",
    "        attention_mask = self.causal_attention_mask(\n",
    "            dim_dest,dim_src,scaled_score.dtype\n",
    "        )\n",
    "        \n",
    "        attention_mask = tf.reshape(attention_mask,[1,1,dim_dest,dim_src])\n",
    "        scaled_score = scaled_score * attention_mask - 1e4 * (1 - attention_mask)\n",
    "        \n",
    "        weights = tf.nn.softmax(scaled_score,axis=-1)\n",
    "        output = tf.matmul(weights,value)\n",
    "        return output,weights\n",
    "    \n",
    "    def separate_heads(self,x,batch_size):\n",
    "        x = tf.reshape(x,(batch_size,-1,self.num_heads,self.project_dim))\n",
    "        return tf.transpose(x,perm=[0,2,1,3])\n",
    "    \n",
    "    def call(self,inputs):\n",
    "        batch_size = tf.shape(inputs)[0]\n",
    "        query = self.query_dense(inputs)\n",
    "        key = self.key_dense(inputs)\n",
    "        value = self.value_dense(inputs)\n",
    "        \n",
    "        query = self.separate_heads(\n",
    "            query,batch_size\n",
    "        )\n",
    "        key = self.separate_heads(\n",
    "            key,batch_size\n",
    "        )\n",
    "        value = self.separate_heads(\n",
    "            value,batch_size\n",
    "        )\n",
    "        \n",
    "        attention,weights = self.attention(query,key,value)\n",
    "        attention = tf.transpose(\n",
    "            attention,perm=[0,2,1,3]\n",
    "        )\n",
    "        concat_attention = tf.reshape(\n",
    "            attention,(batch_size,-1,self.embed_dim)\n",
    "        )\n",
    "        \n",
    "        output = self.combine_heads(\n",
    "            concat_attention\n",
    "        )\n",
    "        return output\n",
    "        "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "class TransformerBlock(layers.Layer):\n",
    "    def __init__(self,embed_dim,num_heads,ff_dim,rate=0.1):\n",
    "        super(TransformerBlock,self).__init__()\n",
    "        self.att = MultiHeadSelfAttention(embed_dim,num_heads)\n",
    "        self.ffn = keras.Sequential(\n",
    "            [\n",
    "                layers.Dense(ff_dim,activation='relu'),\n",
    "                layers.Dense(embed_dim)\n",
    "            ]\n",
    "        )\n",
    "        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n",
    "        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n",
    "        self.dropout1 = layers.Dropout(rate)\n",
    "        self.dropout2 = layers.Dropout(rate)\n",
    "    \n",
    "    def call(self,inputs):\n",
    "        attention_output = self.att(inputs)\n",
    "        attention_output = self.dropout1(attention_output)\n",
    "        out1 = self.layernorm1(inputs + attention_output)\n",
    "        ffn_output = self.ffn(out1)\n",
    "        ffn_output = self.dropout2(ffn_output)\n",
    "        return self.layernorm2(out1 + ffn_output)\n",
    "        \n",
    "        "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [],
   "source": [
    "class TokenAndPositionEmbedding(layers.Layer):\n",
    "    def __init__(self,maxlen,vocab_size,embed_dim):\n",
    "        super(TokenAndPositionEmbedding,self).__init__()\n",
    "        self.token_emb = layers.Embedding(input_dim=vocab_size,output_dim=embed_dim)\n",
    "        self.pos_emb = layers.Embedding(input_dim=maxlen,output_dim=embed_dim)\n",
    "    \n",
    "    \n",
    "    def call(self,x):\n",
    "        maxlen = tf.shape(x)[-1]\n",
    "        positions = tf.range(start=0,limit=maxlen,delta=1)\n",
    "        positions = self.pos_emb(positions)\n",
    "        x = self.token_emb(x)\n",
    "        return x + positions\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "vocab_size = 20000\n",
    "maxlen = 100\n",
    "embed_dim = 256\n",
    "num_heads = 2\n",
    "feed_forward_dim = 256\n",
    "\n",
    "def create_model():\n",
    "    inputs = layers.Input(shape=(maxlen,),dtype=tf.int32)\n",
    "    embedding_layer = TokenAndPositionEmbedding(maxlen,vocab_size,embed_dim)\n",
    "    x = embedding_layer(inputs)\n",
    "    transformer_block = TransformerBlock(embed_dim,num_heads,feed_forward_dim)\n",
    "    x = transformer_block(x)\n",
    "    \n",
    "    outputs = layers.Dense(vocab_size)(x)\n",
    "    model = keras.Model(inputs=inputs,outputs=[outputs,x])\n",
    "    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
    "    model.compile(\n",
    "        'adam',loss=[loss_fn,None])\n",
    "    return model"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": true
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "text": [
      "50000 files\n"
     ],
     "output_type": "stream"
    }
   ],
   "source": [
    "batch_size = 32\n",
    "root_dir = r'E:\\nlp-data\\aclImdb_v1.tar\\aclImdb_v1\\aclImdb'\n",
    "\n",
    "filenames = []\n",
    "directories = [\n",
    "    \"train/pos\",\n",
    "    \"train/neg\",\n",
    "    \"test/pos\",\n",
    "    \"test/neg\",\n",
    "]\n",
    "\n",
    "for dir in directories:\n",
    "    dir = os.path.join(root_dir,dir)\n",
    "    for f in os.listdir(dir):\n",
    "        filenames.append(os.path.join(dir,f))\n",
    "        \n",
    "print(f\"{len(filenames)} files\")    \n",
    "\n",
    "random.shuffle(filenames)\n",
    "text_ds = tf.data.TextLineDataset(filenames)\n",
    "text_ds = text_ds.shuffle(buffer_size=256)\n",
    "text_ds = text_ds.batch(batch_size)\n",
    "\n",
    "\n",
    "def custom_standardization(input_string):\n",
    "    \"\"\" Remove html line-break tags and handle punctuation \"\"\"\n",
    "    lowercased = tf.strings.lower(input_string)\n",
    "    stripped_html = tf.strings.regex_replace(lowercased, \"<br />\", \" \")\n",
    "    return tf.strings.regex_replace(stripped_html, f\"([{string.punctuation}])\", r\" \\1\")\n",
    "\n",
    "vectorize_layer = TextVectorization(\n",
    "    standardize=custom_standardization,\n",
    "    max_tokens=vocab_size -1,\n",
    "    output_mode='int',\n",
    "    output_sequence_length=maxlen + 1,\n",
    ")\n",
    "\n",
    "vectorize_layer.adapt(text_ds)\n",
    "vocab = vectorize_layer.get_vocabulary()\n",
    "\n",
    "def prepare_lm_inputs_labels(text):\n",
    "    text = tf.expand_dims(text,-1)\n",
    "    tokenized_sentences = vectorize_layer(text)\n",
    "    x = tokenized_sentences[:,:-1]\n",
    "    y = tokenized_sentences[:,1:]\n",
    "    return x,y\n",
    "\n",
    "text_ds = text_ds.map(prepare_lm_inputs_labels)\n",
    "text_ds = text_ds.prefetch(tf.data.experimental.AUTOTUNE)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [],
   "source": [
    "class TextGenerator(keras.callbacks.Callback):\n",
    "    def __init__(self,max_tokens,start_tokens,index_to_word,top_k = 10,print_every=1):\n",
    "        self.max_tokens = max_tokens\n",
    "        self.start_tokens = start_tokens\n",
    "        self.index_to_word = index_to_word\n",
    "        self.print_every = print_every\n",
    "        self.k = top_k\n",
    "    \n",
    "    def sample_from(self,logits):\n",
    "        logits,indices = tf.math.top_k(logits,k=self.k,sorted=True)\n",
    "        indices = np.asarray(indices).astype(\"int32\")\n",
    "        preds = keras.activations.softmax(tf.expand_dims(logits,0))[0]\n",
    "        preds = np.asarray(preds).astype('float32')\n",
    "        return np.random.choice(indices,p=preds)\n",
    "    \n",
    "    def detokenize(self,number):\n",
    "        return self.index_to_word[number]\n",
    "    \n",
    "    \n",
    "    def on_epoch_end(self,epoch,logs=None):\n",
    "        start_tokens = [_ for _ in self.start_tokens]\n",
    "        if (epoch + 1) % self.print_every != 0 :\n",
    "            return \n",
    "        num_tokens_generated = 0\n",
    "        tokens_generated = []\n",
    "        while num_tokens_generated <= self.max_tokens:\n",
    "            pad_len = maxlen - len(start_tokens)\n",
    "            sample_index = len(start_tokens) - 1\n",
    "            if pad_len < 0:\n",
    "                x = start_tokens[:maxlen]\n",
    "                sample_index = maxlen - 1\n",
    "            elif pad_len > 0 :\n",
    "                x = start_tokens + [0] * pad_len\n",
    "            else:\n",
    "                x = start_tokens\n",
    "            \n",
    "            x = np.array([x])\n",
    "            y, _ = self.model.predict(x)\n",
    "            sample_token = self.sample_from(y[0][sample_index])\n",
    "            tokens_generated.append(sample_token)\n",
    "            start_tokens.append(sample_token)\n",
    "            num_tokens_generated = len(tokens_generated)\n",
    "        txt = \" \".join(\n",
    "            [self.detokenize(_) for _ in self.start_tokens + tokens_generated]\n",
    "        )\n",
    "        print(f\"generated text:\\n{txt}\\n\")\n",
    "            \n",
    "            \n",
    "word_to_index = {}\n",
    "for index, word in enumerate(vocab):\n",
    "    word_to_index[word] = index\n",
    "\n",
    "start_prompt = \"this movie is\"\n",
    "start_tokens = [word_to_index.get(_, 1) for _ in start_prompt.split()]\n",
    "num_tokens_generated = 40\n",
    "text_gen_callback = TextGenerator(num_tokens_generated, start_tokens, vocab)   \n",
    "    "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "model = create_model()\n",
    "\n",
    "model.fit(text_ds, epochs=1, callbacks=[text_gen_callback])\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": true
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}