ashwinR commited on
Commit
129ce37
1 Parent(s): f0ced2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import gradio as gr
2
  from langchain import PromptTemplate, LLMChain
3
  from langchain.llms import GPT4All
 
4
 
5
- PATH = './ggml-mpt-7b-instruct.bin'
6
 
7
- llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.15, temp=0.5, repeat_penalty=1.1)
 
 
8
 
9
  prompt = PromptTemplate(input_variables=['question'], template="""
10
  Question: {question}
@@ -12,12 +14,17 @@ prompt = PromptTemplate(input_variables=['question'], template="""
12
  Answer: Let's think step by step.
13
  """)
14
 
15
- llm_chain = LLMChain(prompt=prompt, llm=llm)
 
16
 
17
  def generate_response(question):
18
- response = llm_chain.run(question)
 
 
19
  return response
20
 
 
 
21
  inputs = gr.inputs.Textbox(lines=5, label='Enter your prompt here!')
22
  outputs = gr.outputs.Textbox(label='Response')
23
 
 
1
  import gradio as gr
2
  from langchain import PromptTemplate, LLMChain
3
  from langchain.llms import GPT4All
4
+ import gpt4all
5
 
 
6
 
7
+ PATH = 'ggml-mpt-7b-instruct.bin'
8
+
9
+ #llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.15, temp=0.5, repeat_penalty=1.1)
10
 
11
  prompt = PromptTemplate(input_variables=['question'], template="""
12
  Question: {question}
 
14
  Answer: Let's think step by step.
15
  """)
16
 
17
+ #llm_chain = LLMChain(prompt=prompt, llm=llm)
18
+ gptj = gpt4all.GPT4All(PATH)
19
 
20
  def generate_response(question):
21
+ #response = llm_chain.run(question)
22
+ question = [{"role": "user", "content" : "What is the code to add two numbers in Python"}]
23
+ ret = gptj.chat_completion(question)
24
  return response
25
 
26
+
27
+
28
  inputs = gr.inputs.Textbox(lines=5, label='Enter your prompt here!')
29
  outputs = gr.outputs.Textbox(label='Response')
30