File size: 1,102 Bytes
ea92d7b
 
 
129ce37
ea92d7b
 
23760ed
129ce37
 
ea92d7b
 
 
 
 
 
 
129ce37
 
ea92d7b
 
129ce37
3511f30
 
fe57737
bbbec14
ea92d7b
 
129ce37
 
ea92d7b
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from langchain import PromptTemplate, LLMChain
from langchain.llms import GPT4All
import gpt4all


PATH = 'ashwinR/GPT4ALL_CS_ED/ggml-mpt-7b-instruct.bin'

#llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.15, temp=0.5, repeat_penalty=1.1)

prompt = PromptTemplate(input_variables=['question'], template="""
    Question: {question}
    
    Answer: Let's think step by step.
    """)

#llm_chain = LLMChain(prompt=prompt, llm=llm)
gptj = gpt4all.GPT4All(PATH)

def generate_response(question):
    #response = llm_chain.run(question)
    question = [{"role": "user", "content" : question}]
    response = gptj.chat_completion(question)
    response = response['choices'][0]['message']['content']
    response = response if response else "Sorry, Try Again"
    return response



inputs = gr.inputs.Textbox(lines=5, label='Enter your prompt here!')
outputs = gr.outputs.Textbox(label='Response')

title = 'πŸ¦œπŸ”— GPT4ALL Y\'All'
description = 'This is using the MPT model!'

gr.Interface(generate_response, inputs, outputs, title=title, description=description).launch()