thejas-gida commited on
Commit
5e475e7
1 Parent(s): 7ae95f4

Create app1.py

Browse files
Files changed (1) hide show
  1. app1.py +102 -0
app1.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
3
+ from langchain.agents import Tool
4
+ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
5
+ from langchain.chat_models import ChatOpenAI
6
+ from langchain.agents import initialize_agent
7
+ from langchain import OpenAI
8
+ from langchain.prompts import PromptTemplate
9
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
10
+ from langchain.schema import HumanMessage
11
+ from langchain.callbacks.base import BaseCallbackHandler
12
+ from threading import Thread
13
+ from queue import Queue, Empty
14
+ from threading import Thread
15
+ from collections.abc import Generator
16
+
17
+ class QueueCallback(BaseCallbackHandler):
18
+ def __init__(self, q):
19
+ self.q = q
20
+
21
+ def on_llm_new_token(self, token: str, **kwargs: any) -> None:
22
+ self.q.put(token)
23
+
24
+ def on_llm_end(self, *args, **kwargs: any) -> None:
25
+ return self.q.empty()
26
+
27
+ PREFIX = '''
28
+ You are an Automobile expert AI scientist having all the knowledge about all the existing cars and bikes with their respective models and all the information around it.
29
+ If the question is not related to cars, bikes, automobiles or their related models then please let the user know that you don't have the relevant information.
30
+
31
+ Return the entire output in an HTML format.
32
+
33
+ Make sure to follow each and every instructions before giving the response.
34
+ '''
35
+ SUFFIX = '''
36
+ Begin!
37
+ Previous conversation history:
38
+ {chat_history}
39
+ Instructions: {input}
40
+ {agent_scratchpad}
41
+ '''
42
+
43
+ index = GPTSimpleVectorIndex.load_from_disk('./cars_bikes(2).json')
44
+ tools = [
45
+ Tool(
46
+ name = "LlamaIndex",
47
+ func=lambda q: str(index.query(q)),
48
+ description="""You are an Automobile expert equipped with all the information related to all the existing cars, bikes and all its respective brands & models, features, parameters and specifications
49
+ who is capable of perfectly answering everything related to every automobile brands in a tabular format or list.
50
+ Answer using formatted tables or lists as when required.
51
+ If the question is not related to cars, bikes, automobiles or their related models then please let the user know that you don't have the relevant information.
52
+ Please answer keeping in mind the Indian context.
53
+
54
+ Return the entire output in an HTML format.
55
+
56
+ Make sure to follow each and every instructions before giving the response.
57
+ """,
58
+ return_direct=True),
59
+ ]
60
+
61
+ num_outputs = 2000
62
+ conversational_memory = ConversationBufferWindowMemory( memory_key='chat_history', k=5, return_messages=True )
63
+ llm = OpenAI(temperature=0.5, model_name="gpt-4",max_tokens=num_outputs)
64
+
65
+ def stream(input_text) -> Generator:
66
+ conversation = initialize_agent(tools, llm, agent="conversational-react-description", memory=conversational_memory,agent_kwargs={'prefix':PREFIX,'suffix': SUFFIX})
67
+
68
+ # Create a funciton to call - this will run in a thread
69
+ def task():
70
+ resp = conversation.run(input_text)
71
+ q.put(job_done)
72
+
73
+ # Create a thread and start the function
74
+ t = Thread(target=task)
75
+ t.start()
76
+
77
+ content = ""
78
+
79
+ # Get each new token from the queue and yield for our generator
80
+ while True:
81
+ try:
82
+ next_token = q.get(True, timeout=1)
83
+ if next_token is job_done:
84
+ break
85
+ content += next_token
86
+ yield next_token, content
87
+ except Empty:
88
+ continue
89
+
90
+ add = "Return the output in a table format or an ordered list legible to the user.\n"
91
+ def greet(Question):
92
+ for next_token, content in stream(Question):
93
+ yield(add+Question)
94
+
95
+ demo = gr.Interface(
96
+ fn=greet,
97
+ inputs=gr.Textbox(lines=2, label="Question", placeholder="What do you want to know...?"),
98
+ outputs=gr.HTML(""),
99
+ title="Here Auto",
100
+ description="Know everything about Cars and Bikes",
101
+ )
102
+ demo.launch()