from transformers import GPTNeoForCausalLM, GPT2Tokenizer model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") prompt = """This is a discussion between a person and an entrepreneur. person: What is your name? entrepreneur: Hassan person: Where are you working? entrepreneur: It's like one of these fancy adjustable height desk person: What will you work on? entrepreneur: The international development hackathon person: What are you working on? entrepreneur: Developping an iPhone app person: """ def my_split(s, seps): res = [s] for sep in seps: s, res = res, [] for seq in s: res += seq.split(sep) return res # input = "Who are you?" def chat_base(input): p = prompt + input input_ids = tokenizer(p, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.7, max_length=150,) gen_text = tokenizer.batch_decode(gen_tokens)[0] # print(gen_text) result = gen_text[len(p):] # print(">", result) result = my_split(result, [']', '\n'])[1] # print(">>", result) result = result[14:] # print(">>>", result) return result import gradio as gr def chat(message): history = gr.get_state() or [] print(history) response = chat_base(message) history.append((message, response)) gr.set_state(history) html = "
" for user_msg, resp_msg in history: html += f"
{user_msg}
" html += f"
{resp_msg}
" html += "
" return html iface = gr.Interface(chat, gr.inputs.Textbox(label="Ask Hassan a Question"), "html", css=""" .chatbox {display:flex;flex-direction:column} .user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%} .user_msg {background-color:cornflowerblue;color:white;align-self:start} .resp_msg {background-color:lightgray;align-self:self-end} """, allow_screenshot=False, allow_flagging=False) iface.launch()