File size: 2,261 Bytes
a14194d
80decd6
 
3edc835
 
 
80decd6
e158c7f
ebb7458
80decd6
 
 
3edc835
80decd6
 
5b895c3
80decd6
 
 
 
 
 
b01c38e
80decd6
 
 
 
a6adcf2
80decd6
a6adcf2
 
 
 
 
 
 
 
 
 
80decd6
 
9573c90
a6adcf2
80decd6
 
 
 
 
 
 
 
a6adcf2
80decd6
a14194d
217aef6
6af8317
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
import openai
import json
import os
OPENAI_SECRET_TOKEN = os.getenv("OPENAI_SECRET_TOKEN")
PINECONE_SECRET_TOKEN = os.getenv("PINECONE_SECRET_TOKEN")
from pinecone import Pinecone
pc = Pinecone(api_key=PINECONE_SECRET_TOKEN)
index_name = 'langchaindocs'
# connect to index
index = pc.Index(index_name)
# get api key from platform.openai.com
openai.api_key = OPENAI_SECRET_TOKEN
embed_model = "text-embedding-ada-002"
from openai import OpenAI
client = OpenAI(api_key=openai.api_key)
def generate_context(text):
    body=json.dumps({"inputText": text})
    res = client.embeddings.create(
        input=[text],
        model=embed_model
    ).data[0].embedding
    result = index.query(vector=res, top_k=20, include_metadata=True)
    contexts = []
    contexts = contexts + [x['metadata']['text'] for x in result['matches']]
    return contexts
    
def invoke_openai(prompt,history):
    sys_prompt = "You are a helpful assistant that always answers questions."
    new_messages=[
            {"role": "system", "content": sys_prompt},
            {"role": "system", "content": "Use only the context to answer the question"},
        ]
    for conv in history:
        user = conv[0]
        new_messages.append({"role": "user", "content":user })
        assistant = conv[1]
        new_messages.append({"role": "assistant", "content":assistant})
    new_messages.append({"role": "user", "content": prompt})
    # query text-davinci-003
    res = client.chat.completions.create(
        model='gpt-3.5-turbo',
        messages=new_messages,
        temperature=0
    )
    return res.choices[0].message.content
    
def build_prompt(message,history):
    context=generate_context(message)
    prompt=f'Context - {context}\nBased on the above context, answer this question - {message}'
    print(prompt)
    return invoke_openai(prompt,history) 
     

iface = gr.ChatInterface(build_prompt, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Ask me a question", container=False, scale=7), title="GenAIx", examples = ["How coversational chat works","Can you write a code snippet for conversation bot?"], theme="soft", cache_examples=False, retry_btn=None, undo_btn="Delete Previous", clear_btn="Clear",)
iface.launch(share=True)