import gradio as gr import transformers from transformers import pipeline from transformers import BloomTokenizerFast def generate(checkpoint, input_prompt): tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom") generator = pipeline("text-generation", model='simonosgoode/bloom-560m-finetuned-cdn_law', tokenizer=tokenizer) generated_judgement = generator(input_prompt , max_length = 100 , num_return_sequences = 1 , return_full_text = True , verbose = 0 #, num_beams = 1 #, early_stopping = True , temperature = 0.7 #, top_k = 50 # Default 50 , top_p = 1 # Default 1.0 , no_repeat_ngram_size = 3 # Default = 0 , repetition_penalty = 1.0 # Default = 1.0 #, do_sample = True # Default = False )[0]["generated_text"] return generated_judgement with gr.Blocks() as judgements: inputs = gr.Textbox(lines=10, label="Input paragraph") output = gr.Textbox(lines=10, label="Output paragraph") btn = gr.Button("Generate the next paragraph of a judgement") btn.click(fn=generate, inputs=inputs, outputs=output)