from transformers import AutoModelForCausalLM, AutoTokenizer import modin as pd import gradio as gr tokenizer = AutoTokenizer.from_pretrained("KoboldAI/OPT-13B-Erebus") model = AutoModelForCausalLM.from_pretrained("KoboldAI/OPT-13B-Erebus") def chat(Prompt): input_ids = tokenizer(Prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=128) bot = tokenizer.decode(generated_ids[0], skip_special_tokens=True) return bot gr.Interface(fn=chat, inputs='text', outputs='text').launch(debug=True)