GPT4ALL_CS_ED / app.py
ashwinR's picture
Update app.py
23760ed
import gradio as gr
from langchain import PromptTemplate, LLMChain
from langchain.llms import GPT4All
import gpt4all
PATH = 'ashwinR/GPT4ALL_CS_ED/ggml-mpt-7b-instruct.bin'
#llm = GPT4All(model="./ggml-mpt-7b-instruct.bin", top_p=0.15, temp=0.5, repeat_penalty=1.1)
prompt = PromptTemplate(input_variables=['question'], template="""
Question: {question}
Answer: Let's think step by step.
""")
#llm_chain = LLMChain(prompt=prompt, llm=llm)
gptj = gpt4all.GPT4All(PATH)
def generate_response(question):
#response = llm_chain.run(question)
question = [{"role": "user", "content" : question}]
response = gptj.chat_completion(question)
response = response['choices'][0]['message']['content']
response = response if response else "Sorry, Try Again"
return response
inputs = gr.inputs.Textbox(lines=5, label='Enter your prompt here!')
outputs = gr.outputs.Textbox(label='Response')
title = 'πŸ¦œπŸ”— GPT4ALL Y\'All'
description = 'This is using the MPT model!'
gr.Interface(generate_response, inputs, outputs, title=title, description=description).launch()