Harshchan commited on
Commit
818ed57
1 Parent(s): 0e0a642

Upload Final_Notebook.ipynb

Browse files
Files changed (1) hide show
  1. Final_Notebook.ipynb +954 -0
Final_Notebook.ipynb ADDED
@@ -0,0 +1,954 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "2858ba42",
7
+ "metadata": {
8
+ "scrolled": false
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "Requirement already satisfied: tensorflow>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (2.12.0)\n",
16
+ "Requirement already satisfied: tensorflow-intel==2.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow>=2.0.0) (2.12.0)\n"
17
+ ]
18
+ },
19
+ {
20
+ "name": "stderr",
21
+ "output_type": "stream",
22
+ "text": [
23
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
24
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
25
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
26
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
27
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
28
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
29
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
30
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
31
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
32
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
33
+ "WARNING: There was an error checking the latest version of pip."
34
+ ]
35
+ },
36
+ {
37
+ "name": "stdout",
38
+ "output_type": "stream",
39
+ "text": [
40
+ "Requirement already satisfied: astunparse>=1.6.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.6.3)\n",
41
+ "Collecting keras<2.13,>=2.12.0\n",
42
+ " Using cached keras-2.12.0-py2.py3-none-any.whl (1.7 MB)\n",
43
+ "Requirement already satisfied: h5py>=2.9.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.0)\n",
44
+ "Requirement already satisfied: jax>=0.3.15 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.10)\n",
45
+ "Requirement already satisfied: gast<=0.4.0,>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.0)\n",
46
+ "Collecting tensorflow-estimator<2.13,>=2.12.0\n",
47
+ " Using cached tensorflow_estimator-2.12.0-py2.py3-none-any.whl (440 kB)\n",
48
+ "Requirement already satisfied: typing-extensions>=3.6.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.6.2)\n",
49
+ "Requirement already satisfied: flatbuffers>=2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.0.7)\n",
50
+ "Requirement already satisfied: setuptools in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (56.0.0)\n",
51
+ "Requirement already satisfied: six>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.15.0)\n",
52
+ "Requirement already satisfied: packaging in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (23.0)\n",
53
+ "Requirement already satisfied: termcolor>=1.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.1.0)\n",
54
+ "Requirement already satisfied: numpy<1.24,>=1.22 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.23.5)\n",
55
+ "Requirement already satisfied: google-pasta>=0.1.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.2.0)\n",
56
+ "Requirement already satisfied: libclang>=13.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (16.0.0)\n",
57
+ "Requirement already satisfied: tensorboard<2.13,>=2.12 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.12.3)\n",
58
+ "Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.54.2)\n",
59
+ "Requirement already satisfied: wrapt<1.15,>=1.11.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.12.1)\n",
60
+ "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.31.0)\n",
61
+ "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.20.3)\n",
62
+ "Requirement already satisfied: absl-py>=1.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.4.0)\n",
63
+ "Requirement already satisfied: opt-einsum>=2.3.2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.3.0)\n",
64
+ "Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from astunparse>=1.6.0->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.37.0)\n",
65
+ "Requirement already satisfied: scipy>=1.7 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.10.1)\n",
66
+ "Requirement already satisfied: ml-dtypes>=0.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.1.0)\n",
67
+ "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.7.0)\n",
68
+ "Requirement already satisfied: google-auth<3,>=1.6.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.19.0)\n",
69
+ "Requirement already satisfied: requests<3,>=2.21.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.28.2)\n",
70
+ "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.0.0)\n",
71
+ "Requirement already satisfied: werkzeug>=1.0.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.0.2)\n",
72
+ "Requirement already satisfied: markdown>=2.6.8 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.3.4)\n",
73
+ "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.7.2)\n",
74
+ "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.2.8)\n",
75
+ "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (4.2.4)\n",
76
+ "Requirement already satisfied: urllib3<2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.26.15)\n",
77
+ "Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (1.3.0)\n",
78
+ "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2022.12.7)\n",
79
+ "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (2.10)\n",
80
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.0)\n",
81
+ "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (0.4.8)\n",
82
+ "Requirement already satisfied: oauthlib>=3.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow>=2.0.0) (3.1.1)\n",
83
+ "Installing collected packages: tensorflow-estimator, keras\n",
84
+ " Attempting uninstall: tensorflow-estimator\n",
85
+ " Found existing installation: tensorflow-estimator 2.6.0\n",
86
+ " Uninstalling tensorflow-estimator-2.6.0:\n",
87
+ " Successfully uninstalled tensorflow-estimator-2.6.0\n",
88
+ " Attempting uninstall: keras\n",
89
+ " Found existing installation: keras 2.6.0\n",
90
+ " Uninstalling keras-2.6.0:\n",
91
+ " Successfully uninstalled keras-2.6.0\n",
92
+ "Successfully installed keras-2.12.0 tensorflow-estimator-2.12.0\n"
93
+ ]
94
+ },
95
+ {
96
+ "name": "stderr",
97
+ "output_type": "stream",
98
+ "text": [
99
+ "\n"
100
+ ]
101
+ },
102
+ {
103
+ "name": "stdout",
104
+ "output_type": "stream",
105
+ "text": [
106
+ "Collecting tensorflow-hub"
107
+ ]
108
+ },
109
+ {
110
+ "name": "stderr",
111
+ "output_type": "stream",
112
+ "text": [
113
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)"
114
+ ]
115
+ },
116
+ {
117
+ "name": "stdout",
118
+ "output_type": "stream",
119
+ "text": [
120
+ "\n",
121
+ " Using cached tensorflow_hub-0.13.0-py2.py3-none-any.whl (100 kB)\n",
122
+ "Requirement already satisfied: protobuf>=3.19.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-hub) (3.20.3)\n",
123
+ "Requirement already satisfied: numpy>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-hub) (1.23.5)\n",
124
+ "Installing collected packages: tensorflow-hub\n",
125
+ "Successfully installed tensorflow-hub-0.13.0\n"
126
+ ]
127
+ },
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "\n",
133
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
134
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
135
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
136
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
137
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
138
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
139
+ "WARNING: There was an error checking the latest version of pip.\n"
140
+ ]
141
+ },
142
+ {
143
+ "name": "stdout",
144
+ "output_type": "stream",
145
+ "text": [
146
+ "Requirement already satisfied: protobuf==3.20.* in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (3.20.3)\n"
147
+ ]
148
+ },
149
+ {
150
+ "name": "stderr",
151
+ "output_type": "stream",
152
+ "text": [
153
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
154
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
155
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
156
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
157
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
158
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
159
+ "WARNING: There was an error checking the latest version of pip.\n"
160
+ ]
161
+ },
162
+ {
163
+ "name": "stdout",
164
+ "output_type": "stream",
165
+ "text": [
166
+ "Collecting tensorflow-estimator==2.6.0\n",
167
+ " Using cached tensorflow_estimator-2.6.0-py2.py3-none-any.whl (462 kB)\n",
168
+ "Installing collected packages: tensorflow-estimator\n",
169
+ " Attempting uninstall: tensorflow-estimator\n",
170
+ " Found existing installation: tensorflow-estimator 2.12.0\n",
171
+ " Uninstalling tensorflow-estimator-2.12.0:\n",
172
+ " Successfully uninstalled tensorflow-estimator-2.12.0\n",
173
+ "Successfully installed tensorflow-estimator-2.6.0\n"
174
+ ]
175
+ },
176
+ {
177
+ "name": "stderr",
178
+ "output_type": "stream",
179
+ "text": [
180
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
181
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
182
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
183
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
184
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
185
+ "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
186
+ "tensorflow-intel 2.12.0 requires tensorflow-estimator<2.13,>=2.12.0, but you have tensorflow-estimator 2.6.0 which is incompatible.\n",
187
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
188
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
189
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
190
+ "WARNING: There was an error checking the latest version of pip.\n"
191
+ ]
192
+ },
193
+ {
194
+ "name": "stdout",
195
+ "output_type": "stream",
196
+ "text": [
197
+ "Collecting keras==2.6.0\n",
198
+ " Using cached keras-2.6.0-py2.py3-none-any.whl (1.3 MB)\n",
199
+ "Installing collected packages: keras\n",
200
+ " Attempting uninstall: keras"
201
+ ]
202
+ },
203
+ {
204
+ "name": "stderr",
205
+ "output_type": "stream",
206
+ "text": [
207
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
208
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
209
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
210
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
211
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
212
+ "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
213
+ "tensorflow-intel 2.12.0 requires keras<2.13,>=2.12.0, but you have keras 2.6.0 which is incompatible.\n",
214
+ "tensorflow-intel 2.12.0 requires tensorflow-estimator<2.13,>=2.12.0, but you have tensorflow-estimator 2.6.0 which is incompatible.\n",
215
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
216
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
217
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
218
+ "WARNING: There was an error checking the latest version of pip.\n"
219
+ ]
220
+ },
221
+ {
222
+ "name": "stdout",
223
+ "output_type": "stream",
224
+ "text": [
225
+ "\n",
226
+ " Found existing installation: keras 2.12.0\n",
227
+ " Uninstalling keras-2.12.0:\n",
228
+ " Successfully uninstalled keras-2.12.0\n",
229
+ "Successfully installed keras-2.6.0\n"
230
+ ]
231
+ }
232
+ ],
233
+ "source": [
234
+ "! pip install \"tensorflow>=2.0.0\"\n",
235
+ "! pip install tensorflow-hub\n",
236
+ "! pip install protobuf==3.20.*\n",
237
+ "! pip install --upgrade tensorflow-estimator==2.6.0\n",
238
+ "! pip install --upgrade keras==2.6.0"
239
+ ]
240
+ },
241
+ {
242
+ "cell_type": "code",
243
+ "execution_count": null,
244
+ "id": "80c2b648",
245
+ "metadata": {},
246
+ "outputs": [],
247
+ "source": []
248
+ },
249
+ {
250
+ "cell_type": "code",
251
+ "execution_count": 4,
252
+ "id": "c8482712",
253
+ "metadata": {},
254
+ "outputs": [
255
+ {
256
+ "name": "stdout",
257
+ "output_type": "stream",
258
+ "text": [
259
+ "Requirement already satisfied: flatbuffers>=2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (2.0.7)\n",
260
+ "Requirement already satisfied: absl-py>=1.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.4.0)\n",
261
+ "Requirement already satisfied: setuptools in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (56.0.0)\n",
262
+ "Requirement already satisfied: termcolor>=1.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.1.0)\n",
263
+ "Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from astunparse>=1.6.0->tensorflow-intel==2.12.0->tensorflow) (0.37.0)\n",
264
+ "Requirement already satisfied: scipy>=1.7 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow) (1.10.1)\n",
265
+ "Requirement already satisfied: ml-dtypes>=0.1.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from jax>=0.3.15->tensorflow-intel==2.12.0->tensorflow) (0.1.0)\n",
266
+ "Requirement already satisfied: requests<3,>=2.21.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.28.2)\n",
267
+ "Requirement already satisfied: werkzeug>=1.0.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.0.2)\n",
268
+ "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.7.0)\n",
269
+ "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.0.0)\n",
270
+ "Requirement already satisfied: google-auth<3,>=1.6.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.19.0)\n",
271
+ "Requirement already satisfied: markdown>=2.6.8 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.3.4)\n",
272
+ "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.2.8)\n",
273
+ "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (4.2.4)\n",
274
+ "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (4.7.2)\n",
275
+ "Requirement already satisfied: urllib3<2.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.26.15)\n",
276
+ "Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (1.3.0)\n",
277
+ "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2.10)\n",
278
+ "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (2022.12.7)\n",
279
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.1.0)\n",
280
+ "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (0.4.8)\n",
281
+ "Requirement already satisfied: oauthlib>=3.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.13,>=2.12->tensorflow-intel==2.12.0->tensorflow) (3.1.1)\n",
282
+ "Installing collected packages: tensorflow-estimator, numpy, keras\n",
283
+ " Attempting uninstall: tensorflow-estimator\n",
284
+ " Found existing installation: tensorflow-estimator 2.6.0\n",
285
+ " Uninstalling tensorflow-estimator-2.6.0:\n",
286
+ " Successfully uninstalled tensorflow-estimator-2.6.0\n",
287
+ " Attempting uninstall: numpy\n",
288
+ " Found existing installation: numpy 1.19.5\n",
289
+ " Uninstalling numpy-1.19.5:\n",
290
+ " Successfully uninstalled numpy-1.19.5\n",
291
+ " Attempting uninstall: keras\n",
292
+ " Found existing installation: keras 2.6.0\n",
293
+ " Uninstalling keras-2.6.0:\n",
294
+ " Successfully uninstalled keras-2.6.0\n",
295
+ "Successfully installed keras-2.12.0 numpy-1.23.5 tensorflow-estimator-2.12.0\n"
296
+ ]
297
+ },
298
+ {
299
+ "ename": "ImportError",
300
+ "evalue": "cannot import name 'dnn_logit_fn_builder' from partially initialized module 'tensorflow_estimator.python.estimator.canned.dnn' (most likely due to a circular import) (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py)",
301
+ "output_type": "error",
302
+ "traceback": [
303
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
304
+ "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)",
305
+ "\u001b[1;32m<ipython-input-4-0409dcb688a2>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mre\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mtensorflow_hub\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mhub\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mopenai\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;31m#import gradio as gr\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
306
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_hub\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 88\u001b[0m \u001b[1;31m# pylint: disable=g-import-not-at-top\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 89\u001b[0m \u001b[1;31m# pylint: disable=g-bad-import-order\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 90\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLatestModuleExporter\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 91\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mregister_module_for_export\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 92\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_hub\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeature_column\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mimage_embedding_column\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
307
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_hub\\estimator.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 61\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 62\u001b[1;33m \u001b[1;32mclass\u001b[0m \u001b[0mLatestModuleExporter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtf_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExporter\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 63\u001b[0m \"\"\"Regularly exports registered modules into timestamped directories.\n\u001b[0;32m 64\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
308
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow\\python\\util\\lazy_loader.py\u001b[0m in \u001b[0;36m__getattr__\u001b[1;34m(self, item)\u001b[0m\n\u001b[0;32m 56\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__getattr__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mitem\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 58\u001b[1;33m \u001b[0mmodule\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 59\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mitem\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
309
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow\\python\\util\\lazy_loader.py\u001b[0m in \u001b[0;36m_load\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 39\u001b[0m \u001b[1;34m\"\"\"Load the module and insert it into the parent's globals.\"\"\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[1;31m# Import the target module and insert it into the parent's namespace\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 41\u001b[1;33m \u001b[0mmodule\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimport_module\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 42\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_parent_module_globals\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_local_name\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
310
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\importlib\\__init__.py\u001b[0m in \u001b[0;36mimport_module\u001b[1;34m(name, package)\u001b[0m\n\u001b[0;32m 125\u001b[0m \u001b[1;32mbreak\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 126\u001b[0m \u001b[0mlevel\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 127\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_bootstrap\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_gcd_import\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlevel\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpackage\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 128\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 129\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
311
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodule_wrapper\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_module_wrapper\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
312
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\_v1\\estimator\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexperimental\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexport\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapi\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_v1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
313
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\api\\_v1\\estimator\\experimental\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdnn\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdnn_logit_fn_builder\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkmeans\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mKMeansClustering\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mKMeans\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLinearSDCA\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
314
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 24\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeature_column\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mfeature_column_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mframework\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mops\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 26\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 27\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mhead\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mhead_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 28\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0moptimizers\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
315
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\estimator.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 50\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtf_contextlib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtools\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdocs\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdoc_controls\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 52\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodel_fn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mmodel_fn_lib\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 53\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mrun_config\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 54\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mutil\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mestimator_util\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
316
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mestimator\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmodule_wrapper\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_module_wrapper\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
317
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\_api\\v1\\estimator\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexperimental\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mexport\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
318
+ "\u001b[1;32mc:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\_api\\v1\\estimator\\experimental\\__init__.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdnn\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdnn_logit_fn_builder\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkmeans\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mKMeansClustering\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mKMeans\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mtensorflow_estimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanned\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mLinearSDCA\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
319
+ "\u001b[1;31mImportError\u001b[0m: cannot import name 'dnn_logit_fn_builder' from partially initialized module 'tensorflow_estimator.python.estimator.canned.dnn' (most likely due to a circular import) (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\tensorflow_estimator\\python\\estimator\\canned\\dnn.py)"
320
+ ]
321
+ }
322
+ ],
323
+ "source": [
324
+ "import urllib.request\n",
325
+ "import fitz\n",
326
+ "import re\n",
327
+ "import numpy as np\n",
328
+ "import tensorflow_hub as hub\n",
329
+ "import openai\n",
330
+ "#import gradio as gr\n",
331
+ "import os\n",
332
+ "from sklearn.neighbors import NearestNeighbors\n",
333
+ "\n",
334
+ "def download_pdf(url, output_path):\n",
335
+ " urllib.request.urlretrieve(url, output_path)\n",
336
+ "\n",
337
+ "\n",
338
+ "def preprocess(text):\n",
339
+ " text = text.replace('\\n', ' ')\n",
340
+ " text = re.sub('\\s+', ' ', text)\n",
341
+ " return text\n",
342
+ "\n",
343
+ "\n",
344
+ "def pdf_to_text(path, start_page=1, end_page=None):\n",
345
+ " doc = fitz.open(path)\n",
346
+ " total_pages = doc.page_count\n",
347
+ "\n",
348
+ " if end_page is None:\n",
349
+ " end_page = total_pages\n",
350
+ "\n",
351
+ " text_list = []\n",
352
+ "\n",
353
+ " for i in range(start_page-1, end_page):\n",
354
+ " text = doc.load_page(i).get_text(\"text\")\n",
355
+ " text = preprocess(text)\n",
356
+ " text_list.append(text)\n",
357
+ "\n",
358
+ " doc.close()\n",
359
+ " return text_list\n",
360
+ "\n",
361
+ "\n",
362
+ "def text_to_chunks(texts, word_length=150, start_page=1):\n",
363
+ " text_toks = [t.split(' ') for t in texts]\n",
364
+ " page_nums = []\n",
365
+ " chunks = []\n",
366
+ " \n",
367
+ " for idx, words in enumerate(text_toks):\n",
368
+ " for i in range(0, len(words), word_length):\n",
369
+ " chunk = words[i:i+word_length]\n",
370
+ " if (i+word_length) > len(words) and (len(chunk) < word_length) and (\n",
371
+ " len(text_toks) != (idx+1)):\n",
372
+ " text_toks[idx+1] = chunk + text_toks[idx+1]\n",
373
+ " continue\n",
374
+ " chunk = ' '.join(chunk).strip()\n",
375
+ " chunk = f'[Page no. {idx+start_page}]' + ' ' + '\"' + chunk + '\"'\n",
376
+ " chunks.append(chunk)\n",
377
+ " return chunks\n",
378
+ "\n",
379
+ "class SemanticSearch:\n",
380
+ " \n",
381
+ " def __init__(self):\n",
382
+ " self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')\n",
383
+ " self.fitted = False\n",
384
+ " \n",
385
+ " \n",
386
+ " def fit(self, data, batch=1000, n_neighbors=5):\n",
387
+ " self.data = data\n",
388
+ " self.embeddings = self.get_text_embedding(data, batch=batch)\n",
389
+ " n_neighbors = min(n_neighbors, len(self.embeddings))\n",
390
+ " self.nn = NearestNeighbors(n_neighbors=n_neighbors)\n",
391
+ " self.nn.fit(self.embeddings)\n",
392
+ " self.fitted = True\n",
393
+ " \n",
394
+ " \n",
395
+ " def __call__(self, text, return_data=True):\n",
396
+ " inp_emb = self.use([text])\n",
397
+ " neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]\n",
398
+ " \n",
399
+ " if return_data:\n",
400
+ " return [self.data[i] for i in neighbors]\n",
401
+ " else:\n",
402
+ " return neighbors\n",
403
+ " \n",
404
+ " \n",
405
+ " def get_text_embedding(self, texts, batch=1000):\n",
406
+ " embeddings = []\n",
407
+ " for i in range(0, len(texts), batch):\n",
408
+ " text_batch = texts[i:(i+batch)]\n",
409
+ " emb_batch = self.use(text_batch)\n",
410
+ " embeddings.append(emb_batch)\n",
411
+ " embeddings = np.vstack(embeddings)\n",
412
+ " return embeddings\n",
413
+ "\n",
414
+ "\n",
415
+ "\n",
416
+ "def load_recommender(path, start_page=1):\n",
417
+ " global recommender\n",
418
+ " texts = pdf_to_text(path, start_page=start_page)\n",
419
+ " chunks = text_to_chunks(texts, start_page=start_page)\n",
420
+ " recommender.fit(chunks)\n",
421
+ " return 'Corpus Loaded.'\n",
422
+ "\n",
423
+ "def generate_text(openAI_key,prompt, engine=\"text-davinci-003\"):\n",
424
+ " openai.api_key = openAI_key\n",
425
+ " completions = openai.Completion.create(\n",
426
+ " engine=engine,\n",
427
+ " prompt=prompt,\n",
428
+ " max_tokens=512,\n",
429
+ " n=1,\n",
430
+ " stop=None,\n",
431
+ " temperature=0.7,\n",
432
+ " )\n",
433
+ " message = completions.choices[0].text\n",
434
+ " return message\n",
435
+ "\n",
436
+ "def generate_answer(question,openAI_key):\n",
437
+ " topn_chunks = recommender(question)\n",
438
+ " prompt = \"\"\n",
439
+ " prompt += 'search results:\\n\\n'\n",
440
+ " for c in topn_chunks:\n",
441
+ " prompt += c + '\\n\\n'\n",
442
+ " \n",
443
+ " prompt += \"Instructions: Compose a comprehensive reply to the query using the search results given. \"\\\n",
444
+ " \"Cite each reference using [ Page Number] notation (every result has this number at the beginning). \"\\\n",
445
+ " \"Citation should be done at the end of each sentence. If the search results mention multiple subjects \"\\\n",
446
+ " \"with the same name, create separate answers for each. Only include information found in the results and \"\\\n",
447
+ " \"don't add any additional information. Make sure the answer is correct and don't output false content. \"\\\n",
448
+ " \"If the text does not relate to the query, simply state 'Text Not Found in PDF'. Ignore outlier \"\\\n",
449
+ " \"search results which has nothing to do with the question. Only answer what is asked. The \"\\\n",
450
+ " \"answer should be short and concise. Answer step-by-step. \\n\\nQuery: {question}\\nAnswer: \"\n",
451
+ " \n",
452
+ " prompt += f\"Query: {question}\\nAnswer:\"\n",
453
+ " answer = generate_text(openAI_key, prompt,\"text-davinci-003\")\n",
454
+ " return answer\n",
455
+ "\n",
456
+ "\n",
457
+ "def question_answer(url, file, question,openAI_key):\n",
458
+ " if openAI_key.strip()=='':\n",
459
+ " return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'\n",
460
+ " if url.strip() == '' and file == None:\n",
461
+ " return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'\n",
462
+ " \n",
463
+ " if url.strip() != '' and file != None:\n",
464
+ " return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'\n",
465
+ "\n",
466
+ " if url.strip() != '':\n",
467
+ " glob_url = url\n",
468
+ " download_pdf(glob_url, 'corpus.pdf')\n",
469
+ " load_recommender('corpus.pdf')\n",
470
+ "\n",
471
+ " else:\n",
472
+ " old_file_name = file.name\n",
473
+ " file_name = file.name\n",
474
+ " file_name = file_name[:-12] + file_name[-4:]\n",
475
+ " os.rename(old_file_name, file_name)\n",
476
+ " load_recommender(file_name)\n",
477
+ "\n",
478
+ " if question.strip() == '':\n",
479
+ " return '[ERROR]: Question field is empty'\n",
480
+ "\n",
481
+ " return generate_answer(question,openAI_key)\n",
482
+ "\n"
483
+ ]
484
+ },
485
+ {
486
+ "cell_type": "code",
487
+ "execution_count": 3,
488
+ "id": "03db969c",
489
+ "metadata": {},
490
+ "outputs": [
491
+ {
492
+ "name": "stdout",
493
+ "output_type": "stream",
494
+ "text": [
495
+ "Collecting numpy==1.19.*\n",
496
+ " Using cached numpy-1.19.5-cp39-cp39-win_amd64.whl (13.3 MB)\n",
497
+ "Installing collected packages: numpy\n",
498
+ " Attempting uninstall: numpy"
499
+ ]
500
+ },
501
+ {
502
+ "name": "stderr",
503
+ "output_type": "stream",
504
+ "text": [
505
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
506
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
507
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
508
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
509
+ "ERROR: Could not install packages due to an OSError: [WinError 5] Access is denied: 'C:\\\\Users\\\\harsh\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python39\\\\Lib\\\\site-packages\\\\numpy\\\\~libs\\\\libopenblas.FB5AE2TYXYH2IJRDKGDGQ3XBKLKTF43H.gfortran-win_amd64.dll'\n",
510
+ "Consider using the `--user` option or check the permissions.\n",
511
+ "\n",
512
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
513
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
514
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
515
+ "WARNING: There was an error checking the latest version of pip.\n"
516
+ ]
517
+ },
518
+ {
519
+ "name": "stdout",
520
+ "output_type": "stream",
521
+ "text": [
522
+ "\n",
523
+ " Found existing installation: numpy 1.23.5\n",
524
+ " Uninstalling numpy-1.23.5:\n",
525
+ " Successfully uninstalled numpy-1.23.5\n",
526
+ "Requirement already satisfied: tensorflow in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (2.12.0)"
527
+ ]
528
+ },
529
+ {
530
+ "name": "stderr",
531
+ "output_type": "stream",
532
+ "text": [
533
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
534
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
535
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
536
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
537
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
538
+ " WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
539
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
540
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
541
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
542
+ "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
543
+ "openai-whisper 20230314 requires tiktoken==0.3.1, but you have tiktoken 0.3.3 which is incompatible."
544
+ ]
545
+ },
546
+ {
547
+ "name": "stdout",
548
+ "output_type": "stream",
549
+ "text": [
550
+ "\n",
551
+ "Requirement already satisfied: tensorflow-intel==2.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow) (2.12.0)\n",
552
+ "Collecting keras<2.13,>=2.12.0\n",
553
+ " Using cached keras-2.12.0-py2.py3-none-any.whl (1.7 MB)\n",
554
+ "Requirement already satisfied: h5py>=2.9.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.1.0)\n",
555
+ "Collecting tensorflow-estimator<2.13,>=2.12.0\n",
556
+ " Using cached tensorflow_estimator-2.12.0-py2.py3-none-any.whl (440 kB)\n",
557
+ "Requirement already satisfied: typing-extensions>=3.6.6 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (4.6.2)\n",
558
+ "Collecting numpy<1.24,>=1.22\n",
559
+ " Using cached numpy-1.23.5-cp39-cp39-win_amd64.whl (14.7 MB)\n",
560
+ "Requirement already satisfied: google-pasta>=0.1.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.2.0)\n",
561
+ "Requirement already satisfied: packaging in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (23.0)\n",
562
+ "Requirement already satisfied: astunparse>=1.6.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.6.3)\n",
563
+ "Requirement already satisfied: opt-einsum>=2.3.2 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.3.0)\n",
564
+ "Requirement already satisfied: libclang>=13.0.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (16.0.0)\n",
565
+ "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (3.20.3)\n",
566
+ "Requirement already satisfied: six>=1.12.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.15.0)\n",
567
+ "Requirement already satisfied: wrapt<1.15,>=1.11.0 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.12.1)\n",
568
+ "Requirement already satisfied: tensorboard<2.13,>=2.12 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (2.12.3)\n",
569
+ "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.31.0)\n",
570
+ "Requirement already satisfied: gast<=0.4.0,>=0.2.1 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.4.0)\n",
571
+ "Requirement already satisfied: jax>=0.3.15 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (0.4.10)\n",
572
+ "Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from tensorflow-intel==2.12.0->tensorflow) (1.54.2)\n"
573
+ ]
574
+ },
575
+ {
576
+ "name": "stderr",
577
+ "output_type": "stream",
578
+ "text": [
579
+ "\n",
580
+ "argilla 1.6.0 requires wrapt<1.15,>=1.13, but you have wrapt 1.12.1 which is incompatible.\n",
581
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
582
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
583
+ "WARNING: Ignoring invalid distribution -rotobuf (c:\\users\\harsh\\appdata\\local\\programs\\python\\python39\\lib\\site-packages)\n",
584
+ "WARNING: There was an error checking the latest version of pip.\n"
585
+ ]
586
+ }
587
+ ],
588
+ "source": [
589
+ "!pip install --upgrade numpy==1.19.*\n",
590
+ "!pip install --upgrade tensorflow"
591
+ ]
592
+ },
593
+ {
594
+ "cell_type": "code",
595
+ "execution_count": 4,
596
+ "id": "7bdf8293",
597
+ "metadata": {},
598
+ "outputs": [],
599
+ "source": [
600
+ "class SemanticSearch:\n",
601
+ " \n",
602
+ " def __init__(self):\n",
603
+ " self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')\n",
604
+ " self.fitted = False\n",
605
+ " \n",
606
+ " \n",
607
+ " def fit(self, data, batch=1000, n_neighbors=5):\n",
608
+ " self.data = data\n",
609
+ " self.embeddings = self.get_text_embedding(data, batch=batch)\n",
610
+ " n_neighbors = min(n_neighbors, len(self.embeddings))\n",
611
+ " self.nn = NearestNeighbors(n_neighbors=n_neighbors)\n",
612
+ " self.nn.fit(self.embeddings)\n",
613
+ " self.fitted = True\n",
614
+ " \n",
615
+ " \n",
616
+ " def __call__(self, text, return_data=True):\n",
617
+ " inp_emb = self.use([text])\n",
618
+ " neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]\n",
619
+ " \n",
620
+ " if return_data:\n",
621
+ " return [self.data[i] for i in neighbors]\n",
622
+ " else:\n",
623
+ " return neighbors\n",
624
+ " \n",
625
+ " \n",
626
+ " def get_text_embedding(self, texts, batch=1000):\n",
627
+ " embeddings = []\n",
628
+ " for i in range(0, len(texts), batch):\n",
629
+ " text_batch = texts[i:(i+batch)]\n",
630
+ " emb_batch = self.use(text_batch)\n",
631
+ " embeddings.append(emb_batch)\n",
632
+ " embeddings = np.vstack(embeddings)\n",
633
+ " return embeddings\n",
634
+ "\n",
635
+ "\n"
636
+ ]
637
+ },
638
+ {
639
+ "cell_type": "code",
640
+ "execution_count": 15,
641
+ "id": "2f29da6b",
642
+ "metadata": {},
643
+ "outputs": [],
644
+ "source": [
645
+ "def download_pdf(url, output_path):\n",
646
+ " urllib.request.urlretrieve(url, output_path)\n",
647
+ "\n",
648
+ "\n",
649
+ "def preprocess(text):\n",
650
+ " text = text.replace('\\n', ' ')\n",
651
+ " text = re.sub('\\s+', ' ', text)\n",
652
+ " return text\n",
653
+ "\n",
654
+ "\n",
655
+ "def pdf_to_text(path, start_page=1, end_page=None):\n",
656
+ " doc = fitz.open(path)\n",
657
+ " total_pages = doc.page_count\n",
658
+ "\n",
659
+ " if end_page is None:\n",
660
+ " end_page = total_pages\n",
661
+ "\n",
662
+ " text_list = []\n",
663
+ "\n",
664
+ " for i in range(start_page-1, end_page):\n",
665
+ " text = doc.load_page(i).get_text(\"text\")\n",
666
+ " text = preprocess(text)\n",
667
+ " text_list.append(text)\n",
668
+ "\n",
669
+ " doc.close()\n",
670
+ " return text_list\n",
671
+ "\n",
672
+ "\n",
673
+ "def text_to_chunks(texts, word_length=150, start_page=1):\n",
674
+ " text_toks = [t.split(' ') for t in texts]\n",
675
+ " page_nums = []\n",
676
+ " chunks = []\n",
677
+ " \n",
678
+ " for idx, words in enumerate(text_toks):\n",
679
+ " for i in range(0, len(words), word_length):\n",
680
+ " chunk = words[i:i+word_length]\n",
681
+ " if (i+word_length) > len(words) and (len(chunk) < word_length) and (\n",
682
+ " len(text_toks) != (idx+1)):\n",
683
+ " text_toks[idx+1] = chunk + text_toks[idx+1]\n",
684
+ " continue\n",
685
+ " chunk = ' '.join(chunk).strip()\n",
686
+ " chunk = f'[Page no. {idx+start_page}]' + ' ' + '\"' + chunk + '\"'\n",
687
+ " chunks.append(chunk)\n",
688
+ " return chunks\n",
689
+ "\n",
690
+ "\n",
691
+ "def load_recommender(path, start_page=1):\n",
692
+ " global recommender\n",
693
+ " texts = pdf_to_text(path, start_page=start_page)\n",
694
+ " chunks = text_to_chunks(texts, start_page=start_page)\n",
695
+ " recommender.fit(chunks)\n",
696
+ " return 'Corpus Loaded.'\n",
697
+ "\n",
698
+ "def generate_text(openAI_key,prompt, engine=\"text-davinci-003\"):\n",
699
+ " openai.api_key = openAI_key\n",
700
+ " completions = openai.Completion.create(\n",
701
+ " engine=engine,\n",
702
+ " prompt=prompt,\n",
703
+ " max_tokens=512,\n",
704
+ " n=1,\n",
705
+ " stop=None,\n",
706
+ " temperature=0.7,\n",
707
+ " )\n",
708
+ " message = completions.choices[0].text\n",
709
+ " return message\n",
710
+ "\n",
711
+ "def generate_answer(question,openAI_key):\n",
712
+ " topn_chunks = recommender(question)\n",
713
+ " prompt = \"\"\n",
714
+ " prompt += 'search results:\\n\\n'\n",
715
+ " for c in topn_chunks:\n",
716
+ " prompt += c + '\\n\\n'\n",
717
+ " \n",
718
+ " prompt += \"Instructions: Compose a comprehensive reply to the query using the search results given. \"\\\n",
719
+ " \"Cite each reference using [ Page Number] notation (every result has this number at the beginning). \"\\\n",
720
+ " \"Citation should be done at the end of each sentence. If the search results mention multiple subjects \"\\\n",
721
+ " \"with the same name, create separate answers for each. Only include information found in the results and \"\\\n",
722
+ " \"don't add any additional information. Make sure the answer is correct and don't output false content. \"\\\n",
723
+ " \"If the text does not relate to the query, simply state 'Text Not Found in PDF'. Ignore outlier \"\\\n",
724
+ " \"search results which has nothing to do with the question. Only answer what is asked. The \"\\\n",
725
+ " \"answer should be short and concise. Answer step-by-step. \\n\\nQuery: {question}\\nAnswer: \"\n",
726
+ " \n",
727
+ " prompt += f\"Query: {question}\\nAnswer:\"\n",
728
+ " answer = generate_text(openAI_key, prompt,\"text-davinci-003\")\n",
729
+ " return answer\n",
730
+ "\n",
731
+ "\n",
732
+ "def question_answer(url, file, question,openAI_key):\n",
733
+ " \n",
734
+ " if openAI_key.strip()=='':\n",
735
+ " return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'\n",
736
+ " if url.strip() == '' and file == None:\n",
737
+ " return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'\n",
738
+ " \n",
739
+ " if url.strip() != '' and file != None:\n",
740
+ " return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'\n",
741
+ "\n",
742
+ " if url.strip() != '':\n",
743
+ " glob_url = url\n",
744
+ " download_pdf(glob_url, 'corpus.pdf')\n",
745
+ " load_recommender('corpus.pdf')\n",
746
+ "\n",
747
+ " else:\n",
748
+ " old_file_name = file_loc\n",
749
+ " file_name = file_loc\n",
750
+ " file_name = file_name[:-12] + file_name[-4:]\n",
751
+ " os.rename(old_file_name, file_name)\n",
752
+ " load_recommender(file_name)\n",
753
+ "\n",
754
+ " if question.strip() == '':\n",
755
+ " return '[ERROR]: Question field is empty'\n",
756
+ "\n",
757
+ " return generate_answer(question,openAI_key)\n",
758
+ "\n",
759
+ "\n",
760
+ "recommender = SemanticSearch()\n",
761
+ "\n"
762
+ ]
763
+ },
764
+ {
765
+ "cell_type": "code",
766
+ "execution_count": 16,
767
+ "id": "01278c18",
768
+ "metadata": {},
769
+ "outputs": [
770
+ {
771
+ "data": {
772
+ "text/plain": [
773
+ "<__main__.SemanticSearch at 0x226a75673a0>"
774
+ ]
775
+ },
776
+ "execution_count": 16,
777
+ "metadata": {},
778
+ "output_type": "execute_result"
779
+ }
780
+ ],
781
+ "source": [
782
+ "recommender"
783
+ ]
784
+ },
785
+ {
786
+ "cell_type": "markdown",
787
+ "id": "88515cae",
788
+ "metadata": {},
789
+ "source": [
790
+ "pip install --upgrade typing-extensions"
791
+ ]
792
+ },
793
+ {
794
+ "cell_type": "code",
795
+ "execution_count": 12,
796
+ "id": "7adbc726",
797
+ "metadata": {},
798
+ "outputs": [
799
+ {
800
+ "ename": "PermissionError",
801
+ "evalue": "[WinError 32] The process cannot access the file because it is being used by another process: 'userguide.pdf' -> 'u.pdf'",
802
+ "output_type": "error",
803
+ "traceback": [
804
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
805
+ "\u001b[1;31mPermissionError\u001b[0m Traceback (most recent call last)",
806
+ "\u001b[1;32m<ipython-input-12-f05a0f8eb9e9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mfile\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile_loc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[0mquestion\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'How to assign process to workflow'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 7\u001b[1;33m \u001b[0mans\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mquestion_answer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0murl\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile_loc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mquestion\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mopenAI_key\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
807
+ "\u001b[1;32m<ipython-input-11-038ae46770ef>\u001b[0m in \u001b[0;36mquestion_answer\u001b[1;34m(url, file_loc, question, openAI_key)\u001b[0m\n\u001b[0;32m 106\u001b[0m \u001b[0mfile_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m12\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 107\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 108\u001b[1;33m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrename\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mold_file_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 109\u001b[0m \u001b[0mload_recommender\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
808
+ "\u001b[1;31mPermissionError\u001b[0m: [WinError 32] The process cannot access the file because it is being used by another process: 'userguide.pdf' -> 'u.pdf'"
809
+ ]
810
+ }
811
+ ],
812
+ "source": [
813
+ "openAI_key='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'\n",
814
+ "url=''\n",
815
+ "file_loc='C:\\\\Users\\\\harsh\\\\Downloads\\\\Jupyter notebooks\\\\inputFiles\\\\AE_userguide.pdf'\n",
816
+ "file_loc='userguide.pdf'\n",
817
+ "file = open(file_loc)\n",
818
+ "question='How to assign process to workflow'\n",
819
+ "ans=question_answer(url, file, question,openAI_key)"
820
+ ]
821
+ },
822
+ {
823
+ "cell_type": "code",
824
+ "execution_count": null,
825
+ "id": "3c1fe98d",
826
+ "metadata": {},
827
+ "outputs": [],
828
+ "source": [
829
+ "print(ans)"
830
+ ]
831
+ },
832
+ {
833
+ "cell_type": "code",
834
+ "execution_count": null,
835
+ "id": "39eee11f",
836
+ "metadata": {},
837
+ "outputs": [],
838
+ "source": []
839
+ },
840
+ {
841
+ "cell_type": "code",
842
+ "execution_count": 13,
843
+ "id": "6f621d05",
844
+ "metadata": {},
845
+ "outputs": [],
846
+ "source": [
847
+ "import gradio as gr"
848
+ ]
849
+ },
850
+ {
851
+ "cell_type": "code",
852
+ "execution_count": 21,
853
+ "id": "d3c2f47b",
854
+ "metadata": {},
855
+ "outputs": [],
856
+ "source": [
857
+ "title = ' AEGPT'\n",
858
+ "description = \"\"\" AE GPT allows you to chat with the PDF file using Universal Sentence Encoder and Open AI.. The response even cite the page number in square brackets([]) where the information is located.Upload any document and It will give you the correct answers to it\"\"\"\n"
859
+ ]
860
+ },
861
+ {
862
+ "cell_type": "code",
863
+ "execution_count": 22,
864
+ "id": "e683bb8d",
865
+ "metadata": {},
866
+ "outputs": [
867
+ {
868
+ "name": "stdout",
869
+ "output_type": "stream",
870
+ "text": [
871
+ "Running on local URL: http://127.0.0.1:7865\n",
872
+ "Running on public URL: https://bfb35e477097c49460.gradio.live\n",
873
+ "\n",
874
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n"
875
+ ]
876
+ },
877
+ {
878
+ "data": {
879
+ "text/html": [
880
+ "<div><iframe src=\"https://bfb35e477097c49460.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
881
+ ],
882
+ "text/plain": [
883
+ "<IPython.core.display.HTML object>"
884
+ ]
885
+ },
886
+ "metadata": {},
887
+ "output_type": "display_data"
888
+ },
889
+ {
890
+ "data": {
891
+ "text/plain": []
892
+ },
893
+ "execution_count": 22,
894
+ "metadata": {},
895
+ "output_type": "execute_result"
896
+ }
897
+ ],
898
+ "source": [
899
+ "with gr.Blocks() as demo:\n",
900
+ "\n",
901
+ " gr.Markdown(f'<center><h1>{title}</h1></center>')\n",
902
+ " gr.Markdown(description)\n",
903
+ "\n",
904
+ " with gr.Row():\n",
905
+ " \n",
906
+ " with gr.Group():\n",
907
+ " gr.Markdown(f'<p style=\"text-align:center\">Get your Open AI API key <a href=\"https://platform.openai.com/account/api-keys\">here</a></p>')\n",
908
+ " openAI_key=gr.Textbox(label='Enter your OpenAI API key here')\n",
909
+ " url = gr.Textbox(label='Enter PDF URL here')\n",
910
+ " gr.Markdown(\"<center><h4>OR<h4></center>\")\n",
911
+ " file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])\n",
912
+ " question = gr.Textbox(label='Enter your question here')\n",
913
+ " btn = gr.Button(value='Get Answer')\n",
914
+ " btn.style(full_width=True)\n",
915
+ "\n",
916
+ " with gr.Group():\n",
917
+ " answer = gr.Textbox(label='The answer to your question is :')\n",
918
+ "\n",
919
+ " btn.click(question_answer, inputs=[url, file, question,openAI_key], outputs=[answer])\n",
920
+ "#openai.api_key = os.getenv('Your_Key_Here') \n",
921
+ "demo.launch(share=True)"
922
+ ]
923
+ },
924
+ {
925
+ "cell_type": "code",
926
+ "execution_count": null,
927
+ "id": "037c4ea6",
928
+ "metadata": {},
929
+ "outputs": [],
930
+ "source": []
931
+ }
932
+ ],
933
+ "metadata": {
934
+ "kernelspec": {
935
+ "display_name": "Python 3",
936
+ "language": "python",
937
+ "name": "python3"
938
+ },
939
+ "language_info": {
940
+ "codemirror_mode": {
941
+ "name": "ipython",
942
+ "version": 3
943
+ },
944
+ "file_extension": ".py",
945
+ "mimetype": "text/x-python",
946
+ "name": "python",
947
+ "nbconvert_exporter": "python",
948
+ "pygments_lexer": "ipython3",
949
+ "version": "3.9.5"
950
+ }
951
+ },
952
+ "nbformat": 4,
953
+ "nbformat_minor": 5
954
+ }