Cooper
Add app file
c39f600
raw
history blame contribute delete
No virus
1.71 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
def load_model(model_name):
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
def converse(prompt, model1_name, model2_name, temperature1, temperature2):
model1, tokenizer1 = load_model(model1_name)
model2, tokenizer2 = load_model(model2_name)
inputs = tokenizer1(prompt, return_tensors="pt")
outputs = model1.generate(inputs.input_ids, max_new_tokens=50, temperature=temperature1, do_sample=True, pad_token_id=tokenizer1.eos_token_id)
response1 = tokenizer1.decode(outputs[0], skip_special_tokens=True)
inputs = tokenizer2(response1, return_tensors="pt")
outputs = model2.generate(inputs.input_ids, max_new_tokens=50, temperature=temperature2, do_sample=True, pad_token_id=tokenizer2.eos_token_id)
response2 = tokenizer2.decode(outputs[0], skip_special_tokens=True)
return response1, response2
iface = gr.Interface(
fn=converse,
inputs=[
gr.Textbox(label="Input Prompt"),
gr.Dropdown(["gpt2", "distilgpt2", "EleutherAI/gpt-neo-2.7B"], label="Model 1"),
gr.Dropdown(["gpt2", "distilgpt2", "EleutherAI/gpt-neo-2.7B"], label="Model 2"),
gr.Slider(0.1, 1.0, step=0.1, value=0.7, label="Temperature for Model 1"),
gr.Slider(0.1, 1.0, step=0.1, value=0.7, label="Temperature for Model 2")
],
outputs=["text", "text"],
title="Multi-Model Conversation",
description="Input a prompt to start a conversation between two models. Adjust temperatures for more diverse outputs."
)
if __name__ == "__main__":
iface.launch()