{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "2858ba42", "metadata": { "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: tensorflow>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (2.12.0)\n", "Requirement already satisfied: tensorflow-intel==2.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow>=2.0.0) (2.12.0)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip." ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: astunparse>=1.6.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.6.3)\n", "Collecting keras<2.13,>=2.12.0\n", " Using cached keras-2.12.0-py2.py3-none-any.whl (1.7 MB)\n", "Requirement already satisfied: h5py>=2.9.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.0)\n", "Requirement already satisfied: jax>=0.3.15 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.10)\n", "Requirement already satisfied: gast<=0.4.0,>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.0)\n", "Collecting tensorflow-estimator<2.13,>=2.12.0\n", " Using cached tensorflow_estimator-2.12.0-py2.py3-none-any.whl (440 kB)\n", "Requirement already satisfied: typing-extensions>=3.6.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.6.2)\n", "Requirement already satisfied: flatbuffers>=2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.0.7)\n", "Requirement already satisfied: setuptools in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (56.0.0)\n", "Requirement already satisfied: six>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.15.0)\n", "Requirement already satisfied: packaging in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (23.0)\n", "Requirement already satisfied: termcolor>=1.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.1.0)\n", "Requirement already satisfied: numpy<1.24,>=1.22 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.23.5)\n", "Requirement already satisfied: google-pasta>=0.1.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.2.0)\n", "Requirement already satisfied: libclang>=13.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (16.0.0)\n", "Requirement already satisfied: tensorboard<2.13,>=2.12 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.12.3)\n", "Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.54.2)\n", "Requirement already satisfied: wrapt<1.15,>=1.11.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.12.1)\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.31.0)\n", "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.20.3)\n", "Requirement already satisfied: absl-py>=1.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.4.0)\n", "Requirement already satisfied: opt-einsum>=2.3.2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.3.0)\n", "Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from astunparse>=1.6.0->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.37.0)\n", "Requirement already satisfied: scipy>=1.7 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.10.1)\n", "Requirement already satisfied: ml-dtypes>=0.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.1.0)\n", "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.7.0)\n", "Requirement already satisfied: google-auth<3,>=1.6.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.19.0)\n", "Requirement already satisfied: requests<3,>=2.21.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.28.2)\n", "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.0.0)\n", "Requirement already satisfied: werkzeug>=1.0.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.0.2)\n", "Requirement already satisfied: markdown>=2.6.8 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.3.4)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.7.2)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.2.8)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.2.4)\n", "Requirement already satisfied: urllib3<2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.26.15)\n", "Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2022.12.7)\n", "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.10)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.0)\n", "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.8)\n", "Requirement already satisfied: oauthlib>=3.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.1)\n", "Installing collected packages: tensorflow-estimator, keras\n", " Attempting uninstall: tensorflow-estimator\n", " Found existing installation: tensorflow-estimator 2.6.0\n", " Uninstalling tensorflow-estimator-2.6.0:\n", " Successfully uninstalled tensorflow-estimator-2.6.0\n", " Attempting uninstall: keras\n", " Found existing installation: keras 2.6.0\n", " Uninstalling keras-2.6.0:\n", " Successfully uninstalled keras-2.6.0\n", "Successfully installed keras-2.12.0 tensorflow-estimator-2.12.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Collecting tensorflow-hub" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", " Using cached tensorflow_hub-0.13.0-py2.py3-none-any.whl (100 kB)\n", "Requirement already satisfied: protobuf>=3.19.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-hub) (3.20.3)\n", "Requirement already satisfied: numpy>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-hub) (1.23.5)\n", "Installing collected packages: tensorflow-hub\n", "Successfully installed tensorflow-hub-0.13.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: protobuf==3.20.* in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (3.20.3)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Collecting tensorflow-estimator==2.6.0\n", " Using cached tensorflow_estimator-2.6.0-py2.py3-none-any.whl (462 kB)\n", "Installing collected packages: tensorflow-estimator\n", " Attempting uninstall: tensorflow-estimator\n", " Found existing installation: tensorflow-estimator 2.12.0\n", " Uninstalling tensorflow-estimator-2.12.0:\n", " Successfully uninstalled tensorflow-estimator-2.12.0\n", "Successfully installed tensorflow-estimator-2.6.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "tensorflow-intel 2.12.0 requires tensorflow-estimator<2.13,>=2.12.0, but you have tensorflow-estimator 2.6.0 which is incompatible.\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Collecting keras==2.6.0\n", " Using cached keras-2.6.0-py2.py3-none-any.whl (1.3 MB)\n", "Installing collected packages: keras\n", " Attempting uninstall: keras" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "tensorflow-intel 2.12.0 requires keras<2.13,>=2.12.0, but you have keras 2.6.0 which is incompatible.\n", "tensorflow-intel 2.12.0 requires tensorflow-estimator<2.13,>=2.12.0, but you have tensorflow-estimator 2.6.0 which is incompatible.\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", " Found existing installation: keras 2.12.0\n", " Uninstalling keras-2.12.0:\n", " Successfully uninstalled keras-2.12.0\n", "Successfully installed keras-2.6.0\n" ] } ], "source": [ "! pip install \"tensorflow>=2.0.0\"\n", "! pip install tensorflow-hub\n", "! pip install protobuf==3.20.*\n", "! pip install --upgrade tensorflow-estimator==2.6.0\n", "! pip install --upgrade keras==2.6.0" ] }, { "cell_type": "code", "execution_count": null, "id": "80c2b648", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 4, "id": "c8482712", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: flatbuffers>=2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (2.0.7)\n", "Requirement already satisfied: absl-py>=1.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.4.0)\n", "Requirement already satisfied: setuptools in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (56.0.0)\n", "Requirement already satisfied: termcolor>=1.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.1.0)\n", "Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from astunparse>=1.6.0->tensorflow-intel==2.12.0->tensorflow) (0.37.0)\n", "Requirement already satisfied: scipy>=1.7 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow) (1.10.1)\n", "Requirement already satisfied: ml-dtypes>=0.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow) (0.1.0)\n", "Requirement already satisfied: requests<3,>=2.21.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.28.2)\n", "Requirement already satisfied: werkzeug>=1.0.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.0.2)\n", "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.7.0)\n", "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.0.0)\n", "Requirement already satisfied: google-auth<3,>=1.6.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.19.0)\n", "Requirement already satisfied: markdown>=2.6.8 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.3.4)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.2.8)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (4.2.4)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (4.7.2)\n", "Requirement already satisfied: urllib3<2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.26.15)\n", "Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.3.0)\n", "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.10)\n", "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2022.12.7)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.1.0)\n", "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.4.8)\n", "Requirement already satisfied: oauthlib>=3.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.1.1)\n", "Installing collected packages: tensorflow-estimator, numpy, keras\n", " Attempting uninstall: tensorflow-estimator\n", " Found existing installation: tensorflow-estimator 2.6.0\n", " Uninstalling tensorflow-estimator-2.6.0:\n", " Successfully uninstalled tensorflow-estimator-2.6.0\n", " Attempting uninstall: numpy\n", " Found existing installation: numpy 1.19.5\n", " Uninstalling numpy-1.19.5:\n", " Successfully uninstalled numpy-1.19.5\n", " Attempting uninstall: keras\n", " Found existing installation: keras 2.6.0\n", " Uninstalling keras-2.6.0:\n", " Successfully uninstalled keras-2.6.0\n", "Successfully installed keras-2.12.0 numpy-1.23.5 tensorflow-estimator-2.12.0\n" ] }, { "ename": "ImportError", "evalue": "cannot import name 'dnn_logit_fn_builder' from partially initialized module 'tensorflow_estimator.python.estimator.canned.dnn' (most likely due to a circular import) (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py)", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mre\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mtensorflow_hub\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mhub\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mopenai\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;31m#import gradio as gr\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_hub\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 88\u001b[0m \u001b[1;31m# pylint: disable=g-import-not-at-top\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 89\u001b[0m \u001b[1;31m# pylint: disable=g-bad-import-order\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 90\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLatestModuleExporter\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 91\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mregister_module_for_export\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 92\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeature_column\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mimage_embedding_column\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_hub\\estimator.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 61\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 62\u001b[1;33m \u001b[1;32mclass\u001b[0m \u001b[0mLatestModuleExporter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtf_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExporter\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 63\u001b[0m \"\"\"Regularly exports registered modules into timestamped directories.\n\u001b[0;32m 64\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow\\python\\util\\lazy_loader.py\u001b[0m in \u001b[0;36m__getattr__\u001b[1;34m(self, item)\u001b[0m\n\u001b[0;32m 56\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__getattr__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mitem\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 58\u001b[1;33m \u001b[0mmodule\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 59\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mitem\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow\\python\\util\\lazy_loader.py\u001b[0m in \u001b[0;36m_load\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 39\u001b[0m \u001b[1;34m\"\"\"Load the module and insert it into the parent's globals.\"\"\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[1;31m# Import the target module and insert it into the parent's namespace\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 41\u001b[1;33m \u001b[0mmodule\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimport_module\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 42\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_parent_module_globals\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_local_name\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\importlib\\__init__.py\u001b[0m in \u001b[0;36mimport_module\u001b[1;34m(name, package)\u001b[0m\n\u001b[0;32m 125\u001b[0m \u001b[1;32mbreak\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 126\u001b[0m \u001b[0mlevel\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 127\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_bootstrap\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_gcd_import\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlevel\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpackage\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 128\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 129\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodule_wrapper\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_module_wrapper\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\_v1\\estimator\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexperimental\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexport\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\_v1\\estimator\\experimental\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdnn\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdnn_logit_fn_builder\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkmeans\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mKMeansClustering\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mKMeans\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLinearSDCA\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 24\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeature_column\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mfeature_column_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mops\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 26\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 27\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mhead\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mhead_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 28\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0moptimizers\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 50\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtf_contextlib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtools\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdocs\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdoc_controls\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 52\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodel_fn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mmodel_fn_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 53\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mrun_config\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 54\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mutil\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mestimator_util\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodule_wrapper\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_module_wrapper\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\_api\\v1\\estimator\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexperimental\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexport\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\_api\\v1\\estimator\\experimental\\__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdnn\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdnn_logit_fn_builder\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkmeans\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mKMeansClustering\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mKMeans\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLinearSDCA\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mImportError\u001b[0m: cannot import name 'dnn_logit_fn_builder' from partially initialized module 'tensorflow_estimator.python.estimator.canned.dnn' (most likely due to a circular import) (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py)" ] } ], "source": [ "import urllib.request\n", "import fitz\n", "import re\n", "import numpy as np\n", "import tensorflow_hub as hub\n", "import openai\n", "#import gradio as gr\n", "import os\n", "from sklearn.neighbors import NearestNeighbors\n", "\n", "def download_pdf(url, output_path):\n", " urllib.request.urlretrieve(url, output_path)\n", "\n", "\n", "def preprocess(text):\n", " text = text.replace('\\n', ' ')\n", " text = re.sub('\\s+', ' ', text)\n", " return text\n", "\n", "\n", "def pdf_to_text(path, start_page=1, end_page=None):\n", " doc = fitz.open(path)\n", " total_pages = doc.page_count\n", "\n", " if end_page is None:\n", " end_page = total_pages\n", "\n", " text_list = []\n", "\n", " for i in range(start_page-1, end_page):\n", " text = doc.load_page(i).get_text(\"text\")\n", " text = preprocess(text)\n", " text_list.append(text)\n", "\n", " doc.close()\n", " return text_list\n", "\n", "\n", "def text_to_chunks(texts, word_length=150, start_page=1):\n", " text_toks = [t.split(' ') for t in texts]\n", " page_nums = []\n", " chunks = []\n", " \n", " for idx, words in enumerate(text_toks):\n", " for i in range(0, len(words), word_length):\n", " chunk = words[i:i+word_length]\n", " if (i+word_length) > len(words) and (len(chunk) < word_length) and (\n", " len(text_toks) != (idx+1)):\n", " text_toks[idx+1] = chunk + text_toks[idx+1]\n", " continue\n", " chunk = ' '.join(chunk).strip()\n", " chunk = f'[Page no. {idx+start_page}]' + ' ' + '\"' + chunk + '\"'\n", " chunks.append(chunk)\n", " return chunks\n", "\n", "class SemanticSearch:\n", " \n", " def __init__(self):\n", " self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')\n", " self.fitted = False\n", " \n", " \n", " def fit(self, data, batch=1000, n_neighbors=5):\n", " self.data = data\n", " self.embeddings = self.get_text_embedding(data, batch=batch)\n", " n_neighbors = min(n_neighbors, len(self.embeddings))\n", " self.nn = NearestNeighbors(n_neighbors=n_neighbors)\n", " self.nn.fit(self.embeddings)\n", " self.fitted = True\n", " \n", " \n", " def __call__(self, text, return_data=True):\n", " inp_emb = self.use([text])\n", " neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]\n", " \n", " if return_data:\n", " return [self.data[i] for i in neighbors]\n", " else:\n", " return neighbors\n", " \n", " \n", " def get_text_embedding(self, texts, batch=1000):\n", " embeddings = []\n", " for i in range(0, len(texts), batch):\n", " text_batch = texts[i:(i+batch)]\n", " emb_batch = self.use(text_batch)\n", " embeddings.append(emb_batch)\n", " embeddings = np.vstack(embeddings)\n", " return embeddings\n", "\n", "\n", "\n", "def load_recommender(path, start_page=1):\n", " global recommender\n", " texts = pdf_to_text(path, start_page=start_page)\n", " chunks = text_to_chunks(texts, start_page=start_page)\n", " recommender.fit(chunks)\n", " return 'Corpus Loaded.'\n", "\n", "def generate_text(openAI_key,prompt, engine=\"text-davinci-003\"):\n", " openai.api_key = openAI_key\n", " completions = openai.Completion.create(\n", " engine=engine,\n", " prompt=prompt,\n", " max_tokens=512,\n", " n=1,\n", " stop=None,\n", " temperature=0.7,\n", " )\n", " message = completions.choices[0].text\n", " return message\n", "\n", "def generate_answer(question,openAI_key):\n", " topn_chunks = recommender(question)\n", " prompt = \"\"\n", " prompt += 'search results:\\n\\n'\n", " for c in topn_chunks:\n", " prompt += c + '\\n\\n'\n", " \n", " prompt += \"Instructions: Compose a comprehensive reply to the query using the search results given. \"\\\n", " \"Cite each reference using [ Page Number] notation (every result has this number at the beginning). \"\\\n", " \"Citation should be done at the end of each sentence. If the search results mention multiple subjects \"\\\n", " \"with the same name, create separate answers for each. Only include information found in the results and \"\\\n", " \"don't add any additional information. Make sure the answer is correct and don't output false content. \"\\\n", " \"If the text does not relate to the query, simply state 'Text Not Found in PDF'. Ignore outlier \"\\\n", " \"search results which has nothing to do with the question. Only answer what is asked. The \"\\\n", " \"answer should be short and concise. Answer step-by-step. \\n\\nQuery: {question}\\nAnswer: \"\n", " \n", " prompt += f\"Query: {question}\\nAnswer:\"\n", " answer = generate_text(openAI_key, prompt,\"text-davinci-003\")\n", " return answer\n", "\n", "\n", "def question_answer(url, file, question,openAI_key):\n", " if openAI_key.strip()=='':\n", " return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'\n", " if url.strip() == '' and file == None:\n", " return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'\n", " \n", " if url.strip() != '' and file != None:\n", " return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'\n", "\n", " if url.strip() != '':\n", " glob_url = url\n", " download_pdf(glob_url, 'corpus.pdf')\n", " load_recommender('corpus.pdf')\n", "\n", " else:\n", " old_file_name = file.name\n", " file_name = file.name\n", " file_name = file_name[:-12] + file_name[-4:]\n", " os.rename(old_file_name, file_name)\n", " load_recommender(file_name)\n", "\n", " if question.strip() == '':\n", " return '[ERROR]: Question field is empty'\n", "\n", " return generate_answer(question,openAI_key)\n", "\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "03db969c", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting numpy==1.19.*\n", " Using cached numpy-1.19.5-cp39-cp39-win_amd64.whl (13.3 MB)\n", "Installing collected packages: numpy\n", " Attempting uninstall: numpy" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "ERROR: Could not install packages due to an OSError: [WinError 5] Access is denied: 'C:\\\\Users\\\\harsh\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python39\\\\Lib\\\\site-packages\\\\numpy\\\\~libs\\\\libopenblas.FB5AE2TYXYH2IJRDKGDGQ3XBKLKTF43H.gfortran-win_amd64.dll'\n", "Consider using the `--user` option or check the permissions.\n", "\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", " Found existing installation: numpy 1.23.5\n", " Uninstalling numpy-1.23.5:\n", " Successfully uninstalled numpy-1.23.5\n", "Requirement already satisfied: tensorflow in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (2.12.0)" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "openai-whisper 20230314 requires tiktoken==0.3.1, but you have tiktoken 0.3.3 which is incompatible." ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Requirement already satisfied: tensorflow-intel==2.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow) (2.12.0)\n", "Collecting keras<2.13,>=2.12.0\n", " Using cached keras-2.12.0-py2.py3-none-any.whl (1.7 MB)\n", "Requirement already satisfied: h5py>=2.9.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.1.0)\n", "Collecting tensorflow-estimator<2.13,>=2.12.0\n", " Using cached tensorflow_estimator-2.12.0-py2.py3-none-any.whl (440 kB)\n", "Requirement already satisfied: typing-extensions>=3.6.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (4.6.2)\n", "Collecting numpy<1.24,>=1.22\n", " Using cached numpy-1.23.5-cp39-cp39-win_amd64.whl (14.7 MB)\n", "Requirement already satisfied: google-pasta>=0.1.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.2.0)\n", "Requirement already satisfied: packaging in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (23.0)\n", "Requirement already satisfied: astunparse>=1.6.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.6.3)\n", "Requirement already satisfied: opt-einsum>=2.3.2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.3.0)\n", "Requirement already satisfied: libclang>=13.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (16.0.0)\n", "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.20.3)\n", "Requirement already satisfied: six>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.15.0)\n", "Requirement already satisfied: wrapt<1.15,>=1.11.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.12.1)\n", "Requirement already satisfied: tensorboard<2.13,>=2.12 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (2.12.3)\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.31.0)\n", "Requirement already satisfied: gast<=0.4.0,>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.4.0)\n", "Requirement already satisfied: jax>=0.3.15 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.4.10)\n", "Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.54.2)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "argilla 1.6.0 requires wrapt<1.15,>=1.13, but you have wrapt 1.12.1 which is incompatible.\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n", "WARNING: There was an error checking the latest version of pip.\n" ] } ], "source": [ "!pip install --upgrade numpy==1.19.*\n", "!pip install --upgrade tensorflow" ] }, { "cell_type": "code", "execution_count": 4, "id": "7bdf8293", "metadata": {}, "outputs": [], "source": [ "class SemanticSearch:\n", " \n", " def __init__(self):\n", " self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')\n", " self.fitted = False\n", " \n", " \n", " def fit(self, data, batch=1000, n_neighbors=5):\n", " self.data = data\n", " self.embeddings = self.get_text_embedding(data, batch=batch)\n", " n_neighbors = min(n_neighbors, len(self.embeddings))\n", " self.nn = NearestNeighbors(n_neighbors=n_neighbors)\n", " self.nn.fit(self.embeddings)\n", " self.fitted = True\n", " \n", " \n", " def __call__(self, text, return_data=True):\n", " inp_emb = self.use([text])\n", " neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]\n", " \n", " if return_data:\n", " return [self.data[i] for i in neighbors]\n", " else:\n", " return neighbors\n", " \n", " \n", " def get_text_embedding(self, texts, batch=1000):\n", " embeddings = []\n", " for i in range(0, len(texts), batch):\n", " text_batch = texts[i:(i+batch)]\n", " emb_batch = self.use(text_batch)\n", " embeddings.append(emb_batch)\n", " embeddings = np.vstack(embeddings)\n", " return embeddings\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 15, "id": "2f29da6b", "metadata": {}, "outputs": [], "source": [ "def download_pdf(url, output_path):\n", " urllib.request.urlretrieve(url, output_path)\n", "\n", "\n", "def preprocess(text):\n", " text = text.replace('\\n', ' ')\n", " text = re.sub('\\s+', ' ', text)\n", " return text\n", "\n", "\n", "def pdf_to_text(path, start_page=1, end_page=None):\n", " doc = fitz.open(path)\n", " total_pages = doc.page_count\n", "\n", " if end_page is None:\n", " end_page = total_pages\n", "\n", " text_list = []\n", "\n", " for i in range(start_page-1, end_page):\n", " text = doc.load_page(i).get_text(\"text\")\n", " text = preprocess(text)\n", " text_list.append(text)\n", "\n", " doc.close()\n", " return text_list\n", "\n", "\n", "def text_to_chunks(texts, word_length=150, start_page=1):\n", " text_toks = [t.split(' ') for t in texts]\n", " page_nums = []\n", " chunks = []\n", " \n", " for idx, words in enumerate(text_toks):\n", " for i in range(0, len(words), word_length):\n", " chunk = words[i:i+word_length]\n", " if (i+word_length) > len(words) and (len(chunk) < word_length) and (\n", " len(text_toks) != (idx+1)):\n", " text_toks[idx+1] = chunk + text_toks[idx+1]\n", " continue\n", " chunk = ' '.join(chunk).strip()\n", " chunk = f'[Page no. {idx+start_page}]' + ' ' + '\"' + chunk + '\"'\n", " chunks.append(chunk)\n", " return chunks\n", "\n", "\n", "def load_recommender(path, start_page=1):\n", " global recommender\n", " texts = pdf_to_text(path, start_page=start_page)\n", " chunks = text_to_chunks(texts, start_page=start_page)\n", " recommender.fit(chunks)\n", " return 'Corpus Loaded.'\n", "\n", "def generate_text(openAI_key,prompt, engine=\"text-davinci-003\"):\n", " openai.api_key = openAI_key\n", " completions = openai.Completion.create(\n", " engine=engine,\n", " prompt=prompt,\n", " max_tokens=512,\n", " n=1,\n", " stop=None,\n", " temperature=0.7,\n", " )\n", " message = completions.choices[0].text\n", " return message\n", "\n", "def generate_answer(question,openAI_key):\n", " topn_chunks = recommender(question)\n", " prompt = \"\"\n", " prompt += 'search results:\\n\\n'\n", " for c in topn_chunks:\n", " prompt += c + '\\n\\n'\n", " \n", " prompt += \"Instructions: Compose a comprehensive reply to the query using the search results given. \"\\\n", " \"Cite each reference using [ Page Number] notation (every result has this number at the beginning). \"\\\n", " \"Citation should be done at the end of each sentence. If the search results mention multiple subjects \"\\\n", " \"with the same name, create separate answers for each. Only include information found in the results and \"\\\n", " \"don't add any additional information. Make sure the answer is correct and don't output false content. \"\\\n", " \"If the text does not relate to the query, simply state 'Text Not Found in PDF'. Ignore outlier \"\\\n", " \"search results which has nothing to do with the question. Only answer what is asked. The \"\\\n", " \"answer should be short and concise. Answer step-by-step. \\n\\nQuery: {question}\\nAnswer: \"\n", " \n", " prompt += f\"Query: {question}\\nAnswer:\"\n", " answer = generate_text(openAI_key, prompt,\"text-davinci-003\")\n", " return answer\n", "\n", "\n", "def question_answer(url, file, question,openAI_key):\n", " \n", " if openAI_key.strip()=='':\n", " return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'\n", " if url.strip() == '' and file == None:\n", " return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'\n", " \n", " if url.strip() != '' and file != None:\n", " return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'\n", "\n", " if url.strip() != '':\n", " glob_url = url\n", " download_pdf(glob_url, 'corpus.pdf')\n", " load_recommender('corpus.pdf')\n", "\n", " else:\n", " old_file_name = file_loc\n", " file_name = file_loc\n", " file_name = file_name[:-12] + file_name[-4:]\n", " os.rename(old_file_name, file_name)\n", " load_recommender(file_name)\n", "\n", " if question.strip() == '':\n", " return '[ERROR]: Question field is empty'\n", "\n", " return generate_answer(question,openAI_key)\n", "\n", "\n", "recommender = SemanticSearch()\n", "\n" ] }, { "cell_type": "code", "execution_count": 16, "id": "01278c18", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<__main__.SemanticSearch at 0x226a75673a0>" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "recommender" ] }, { "cell_type": "markdown", "id": "88515cae", "metadata": {}, "source": [ "pip install --upgrade typing-extensions" ] }, { "cell_type": "code", "execution_count": 12, "id": "7adbc726", "metadata": {}, "outputs": [ { "ename": "PermissionError", "evalue": "[WinError 32] The process cannot access the file because it is being used by another process: 'userguide.pdf' -> 'u.pdf'", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mPermissionError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mfile\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile_loc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[0mquestion\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'How to assign process to workflow'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 7\u001b[1;33m \u001b[0mans\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mquestion_answer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0murl\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile_loc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mquestion\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mopenAI_key\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[1;32m\u001b[0m in \u001b[0;36mquestion_answer\u001b[1;34m(url, file_loc, question, openAI_key)\u001b[0m\n\u001b[0;32m 106\u001b[0m \u001b[0mfile_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m12\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 107\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 108\u001b[1;33m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrename\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mold_file_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 109\u001b[0m \u001b[0mload_recommender\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mPermissionError\u001b[0m: [WinError 32] The process cannot access the file because it is being used by another process: 'userguide.pdf' -> 'u.pdf'" ] } ], "source": [ "openAI_key='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'\n", "url=''\n", "file_loc='C:\\\\Users\\\\harsh\\\\Downloads\\\\Jupyter notebooks\\\\inputFiles\\\\AE_userguide.pdf'\n", "file_loc='userguide.pdf'\n", "file = open(file_loc)\n", "question='How to assign process to workflow'\n", "ans=question_answer(url, file, question,openAI_key)" ] }, { "cell_type": "code", "execution_count": null, "id": "3c1fe98d", "metadata": {}, "outputs": [], "source": [ "print(ans)" ] }, { "cell_type": "code", "execution_count": null, "id": "39eee11f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 13, "id": "6f621d05", "metadata": {}, "outputs": [], "source": [ "import gradio as gr" ] }, { "cell_type": "code", "execution_count": 21, "id": "d3c2f47b", "metadata": {}, "outputs": [], "source": [ "title = ' AEGPT'\n", "description = \"\"\" AE GPT allows you to chat with the PDF file using Universal Sentence Encoder and Open AI.. The response even cite the page number in square brackets([]) where the information is located.Upload any document and It will give you the correct answers to it\"\"\"\n" ] }, { "cell_type": "code", "execution_count": 22, "id": "e683bb8d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7865\n", "Running on public URL: https://bfb35e477097c49460.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "with gr.Blocks() as demo:\n", "\n", " gr.Markdown(f'

{title}

')\n", " gr.Markdown(description)\n", "\n", " with gr.Row():\n", " \n", " with gr.Group():\n", " gr.Markdown(f'

Get your Open AI API key here

')\n", " openAI_key=gr.Textbox(label='Enter your OpenAI API key here')\n", " url = gr.Textbox(label='Enter PDF URL here')\n", " gr.Markdown(\"

OR

\")\n", " file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])\n", " question = gr.Textbox(label='Enter your question here')\n", " btn = gr.Button(value='Get Answer')\n", " btn.style(full_width=True)\n", "\n", " with gr.Group():\n", " answer = gr.Textbox(label='The answer to your question is :')\n", "\n", " btn.click(question_answer, inputs=[url, file, question,openAI_key], outputs=[answer])\n", "#openai.api_key = os.getenv('Your_Key_Here') \n", "demo.launch(share=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "037c4ea6", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.5" } }, "nbformat": 4, "nbformat_minor": 5 }