{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "ex-oKOQ95wAu",
    "outputId": "0eef58ac-1fb5-4879-df27-bc9378bef581"
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "Y0NOzxRF5zzT"
   },
   "outputs": [],
   "source": [
    "from gensim.models import Word2Vec\n",
    "from keras.datasets import imdb\n",
    "\n",
    "# Load the IMDB dataset\n",
    "(x_train, _), (x_test, _) = imdb.load_data(num_words=10000)\n",
    "\n",
    "# Convert the sequences of word indexes to lists of words\n",
    "word_index = imdb.get_word_index()\n",
    "index_to_word = {i: word for word, i in word_index.items()}\n",
    "index_to_word[0] = '<PAD>'\n",
    "index_to_word[1] = '<START>'\n",
    "index_to_word[2] = '<UNK>'\n",
    "x_train = [[index_to_word.get(i, '') for i in seq] for seq in x_train]\n",
    "x_test = [[index_to_word.get(i, '') for i in seq] for seq in x_test]\n",
    "\n",
    "# Train the Word2Vec model\n",
    "w2v_model = Word2Vec(sentences=x_train + x_test, vector_size=100, window=5, min_count=1, workers=4, epochs=10)\n",
    "\n",
    "# Save the trained model to a file\n",
    "w2v_model.save('w2v_model.bin')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
