Spaces:
Sleeping
Sleeping
sharjeel1477
commited on
Commit
•
6db1d82
1
Parent(s):
6b226dd
Update ask.py
Browse files
ask.py
CHANGED
@@ -91,14 +91,14 @@ def askQuestion(brain, question, prompt, temperature, maxTokens, combined):
|
|
91 |
|
92 |
query = question
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
new = LLMPredictor(llm=OpenAI(
|
97 |
-
|
98 |
|
99 |
service_context = ServiceContext.from_defaults(llm_predictor=new)
|
100 |
query_engine = index.as_query_engine(
|
101 |
-
similarity_top_k=
|
102 |
|
103 |
query = question
|
104 |
if combined == False:
|
@@ -108,7 +108,7 @@ def askQuestion(brain, question, prompt, temperature, maxTokens, combined):
|
|
108 |
response.response = defaultResponse
|
109 |
res = {"data": [response.response, {
|
110 |
"memory": [question, response.response]}]}
|
111 |
-
|
112 |
return json.dumps(res)
|
113 |
else:
|
114 |
response = query_engine.query(finalQuestion)
|
@@ -121,7 +121,8 @@ def askQuestion(brain, question, prompt, temperature, maxTokens, combined):
|
|
121 |
def followUp(brainName, question, memory, temperature, maxTokens):
|
122 |
if (memory == False):
|
123 |
return "Please Initiate the Chat first.."
|
124 |
-
|
|
|
125 |
newMemory = ConversationBufferMemory(memory_key="chat_history")
|
126 |
for index in range(len(memory)):
|
127 |
if index % 2 == 0:
|
|
|
91 |
|
92 |
query = question
|
93 |
|
94 |
+
new = LLMPredictor(llm=ChatOpenAI(temperature=temperature,
|
95 |
+
model_name="gpt-3.5-turbo-16k", max_tokens=maxTokens))
|
96 |
+
# new = LLMPredictor(llm=OpenAI(
|
97 |
+
# temperature=temperature, model_name="text-davinci-003", max_tokens=maxTokens))
|
98 |
|
99 |
service_context = ServiceContext.from_defaults(llm_predictor=new)
|
100 |
query_engine = index.as_query_engine(
|
101 |
+
similarity_top_k=7, response_mode="compact", service_context=service_context,text_qa_template=QA_PROMPT)
|
102 |
|
103 |
query = question
|
104 |
if combined == False:
|
|
|
108 |
response.response = defaultResponse
|
109 |
res = {"data": [response.response, {
|
110 |
"memory": [question, response.response]}]}
|
111 |
+
|
112 |
return json.dumps(res)
|
113 |
else:
|
114 |
response = query_engine.query(finalQuestion)
|
|
|
121 |
def followUp(brainName, question, memory, temperature, maxTokens):
|
122 |
if (memory == False):
|
123 |
return "Please Initiate the Chat first.."
|
124 |
+
if(int(maxTokens)>2000):
|
125 |
+
maxTokens=2000
|
126 |
newMemory = ConversationBufferMemory(memory_key="chat_history")
|
127 |
for index in range(len(memory)):
|
128 |
if index % 2 == 0:
|