Spaces:
Runtime error
Runtime error
Update app.py
#1
by
burak
- opened
app.py
CHANGED
@@ -1,30 +1,84 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import openai
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Benim hesap
|
2 |
+
|
3 |
import gradio as gr
|
4 |
+
import pinecone
|
5 |
import openai
|
6 |
|
7 |
+
index_name = 'gpt-4-langchain-docs'
|
8 |
+
|
9 |
+
# initialize connection to pinecone
|
10 |
+
pinecone.init(
|
11 |
+
api_key="08dc4515-1799-4a23-81a0-b9f86975f84f", # app.pinecone.io (console)
|
12 |
+
environment="us-west4-gcp" # next to API key in console
|
13 |
+
)
|
14 |
+
|
15 |
+
# check if index already exists (it shouldn't if this is first time)
|
16 |
+
if index_name not in pinecone.list_indexes():
|
17 |
+
# if does not exist, create index
|
18 |
+
pinecone.create_index(
|
19 |
+
index_name,
|
20 |
+
dimension=len(res['data'][0]['embedding']),
|
21 |
+
metric='dotproduct'
|
22 |
+
)
|
23 |
+
# connect to index
|
24 |
+
index = pinecone.GRPCIndex(index_name)
|
25 |
|
26 |
+
|
27 |
+
def ask(OpenAI_key,query):
|
28 |
+
|
29 |
+
|
30 |
+
openai.api_key = OpenAI_key #platform.openai.com
|
31 |
+
|
32 |
+
embed_model = "text-embedding-ada-002"
|
33 |
+
|
34 |
+
res = openai.Embedding.create(
|
35 |
+
input=[
|
36 |
+
"Sample document text goes here",
|
37 |
+
"there will be several phrases in each batch"
|
38 |
+
], engine=embed_model
|
39 |
+
)
|
40 |
+
|
41 |
+
index_name = 'gpt-4-langchain-docs'
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
|
46 |
|
47 |
+
|
48 |
+
res = openai.Embedding.create(
|
49 |
+
input=[query],
|
50 |
+
engine=embed_model
|
51 |
+
)
|
52 |
+
|
53 |
+
# retrieve from Pinecone
|
54 |
+
xq = res['data'][0]['embedding']
|
55 |
+
|
56 |
+
# get relevant contexts (including the questions)
|
57 |
+
res = index.query(xq, top_k=5, include_metadata=True)
|
58 |
+
|
59 |
+
contexts = [item['metadata']['text'] for item in res['matches']]
|
60 |
|
61 |
+
augmented_query = "\n\n---\n\n".join(contexts)+"\n\n-----\n\n"+query
|
62 |
+
|
63 |
+
primer = f"""You are Q&A bot. A highly intelligent system that answers
|
64 |
+
user questions based on the information provided by the user above
|
65 |
+
each question. If the information can not be found in the information
|
66 |
+
provided by the user you truthfully say "I don't know".
|
67 |
+
"""
|
68 |
+
|
69 |
+
res = openai.ChatCompletion.create(
|
70 |
+
model="gpt-3.5-turbo",
|
71 |
+
messages=[
|
72 |
+
{"role": "system", "content": primer},
|
73 |
+
{"role": "user", "content": augmented_query}
|
74 |
+
]
|
75 |
+
)
|
76 |
+
from IPython.display import Markdown
|
77 |
+
|
78 |
+
response = (res['choices'][0]['message']['content'])
|
79 |
+
|
80 |
+
|
81 |
+
return response
|
82 |
+
|
83 |
+
demo = gr.Interface(title = 'ShipsGo AI Assistant' , fn=ask, inputs=["text","text"] , outputs="text")
|
84 |
+
demo.launch()
|