Spaces:
Sleeping
Sleeping
FabioSantos
commited on
Commit
•
e9400d5
1
Parent(s):
e1bc29c
Upload rag_retriver.py
Browse files- rag_retriver.py +37 -30
rag_retriver.py
CHANGED
@@ -1,49 +1,56 @@
|
|
1 |
|
2 |
-
import pinecone
|
3 |
-
from openai import OpenAI
|
4 |
from dotenv import dotenv_values
|
|
|
|
|
|
|
|
|
5 |
|
6 |
#Loading Credentials
|
7 |
env_name = "credentials.env"
|
8 |
config = dotenv_values(env_name)
|
9 |
-
client = OpenAI(api_key= config["openai_api"])
|
10 |
-
|
11 |
|
12 |
-
#Connection
|
13 |
-
index_name = config["index_name"]
|
14 |
# initialize connection to pinecone (get API key at app.pinecone.io)
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
20 |
|
21 |
#Vector Search
|
22 |
def Vector_search(query):
|
23 |
Rag_data = ""
|
24 |
-
xq =
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
for match in res['matches']:
|
27 |
if match['score'] < 0.80:
|
28 |
-
continue
|
29 |
Rag_data += match['metadata']['text']
|
30 |
-
return Rag_data
|
|
|
31 |
|
32 |
#GPT Completion
|
33 |
-
def GPT_completion_with_vector_search(prompt
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
36 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
37 |
-
You
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
{f"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
|
45 |
-
{f"role": "user", "content": rag +", Prompt: "+ prompt},
|
46 |
-
]
|
47 |
-
)
|
48 |
-
|
49 |
-
return response.choices[0].message.content
|
|
|
1 |
|
|
|
|
|
2 |
from dotenv import dotenv_values
|
3 |
+
import google.generativeai as genai
|
4 |
+
import pinecone
|
5 |
+
import cohere
|
6 |
+
co = cohere.Client('qCFXJU5hVIvVrQOBQfQr6lvK8p8rLSYHfVIxxZjP')
|
7 |
|
8 |
#Loading Credentials
|
9 |
env_name = "credentials.env"
|
10 |
config = dotenv_values(env_name)
|
|
|
|
|
11 |
|
|
|
|
|
12 |
# initialize connection to pinecone (get API key at app.pinecone.io)
|
13 |
+
|
14 |
+
index_name = 'rag-demo'
|
15 |
+
pinecone.init(
|
16 |
+
api_key='d4f03782-3537-43b2-9050-ed2c00be71a2',
|
17 |
+
environment='gcp-starter'
|
18 |
+
)
|
19 |
+
index = pinecone.Index('rag-demo')
|
20 |
|
21 |
#Vector Search
|
22 |
def Vector_search(query):
|
23 |
Rag_data = ""
|
24 |
+
xq = co.embed(
|
25 |
+
texts=['transportation law'],
|
26 |
+
model='embed-english-v3.0',
|
27 |
+
input_type='classification'
|
28 |
+
)
|
29 |
+
res = index.query([xq.embeddings[0]], top_k=2, include_metadata=True)
|
30 |
for match in res['matches']:
|
31 |
if match['score'] < 0.80:
|
32 |
+
continue
|
33 |
Rag_data += match['metadata']['text']
|
34 |
+
return Rag_data
|
35 |
+
|
36 |
|
37 |
#GPT Completion
|
38 |
+
def GPT_completion_with_vector_search(prompt):
|
39 |
+
genai.configure(api_key="AIzaSyAt0u96OqVw5-rAdM3pmL1rjT8H_jYAnJ8")
|
40 |
+
for m in genai.list_models():
|
41 |
+
if 'generateContent' in m.supported_generation_methods:
|
42 |
+
print(m.name)
|
43 |
+
|
44 |
+
model = genai.GenerativeModel('gemini-pro')
|
45 |
+
rag = Vector_search(prompt)
|
46 |
+
|
47 |
+
DEFAULT_SYSTEM_PROMPT = '''You are a helpful, respectful and honest INTP-T AI Assistant named Gathnex AI. You are talking to a human User.
|
48 |
Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
49 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
50 |
+
You can speak fluently in English
|
51 |
+
'''
|
52 |
+
|
53 |
+
response = model.generate_content(DEFAULT_SYSTEM_PROMPT+rag+prompt)
|
54 |
+
|
55 |
+
|
56 |
+
return response.text
|
|
|
|
|
|
|
|
|
|
|
|