import torch import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer from instruct_pipeline import InstructionTextGenerationPipeline tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b", padding_side="left") model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b", torch_dtype=torch.bfloat16) generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer) def textGen(text): return generate_text(text) demo = gr.Interface(fn=textGen, inputs="text", outputs="text") demo.launch()