import gradio as gr from transformers import pipeline path = 'HamadML/bloomz-560m_p' pipe = pipeline('text-generation', model=path, tokenizer=path) def generate_poetry(prompt, top_p, top_k, temperature, max_length): # Add instruction for the model instruction = "Generate poetry based on the given prompt." model_input = generate_prompt(instruction, prompt) # Generate poetry output = pipe(model_input, max_length=max_length, do_sample=True, top_k=top_k, top_p=top_p, temperature=temperature ) return output[0]['generated_text'] def generate_prompt(instruction, input=None): if input: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" # Create a Gradio interface inputs = [ gr.inputs.Textbox(label="Enter a prompt", placeholder="Enter a prompt", lines=3), gr.inputs.Slider(label="Top-p value", minimum=0.0, maximum=1.0, default=0.9, step=0.1), gr.inputs.Slider(label="Top-k value", minimum=1, maximum=1000, default=400, step=1), gr.inputs.Slider(label="Temperature value", minimum=0.0, maximum=1.0, default=0.9, step=0.1), gr.inputs.Slider(label="Max length", minimum=1, maximum=300, default=200, step=1), ] outputs = gr.outputs.Textbox(label="Generated Poetry") examples = [ ["چرته چې هم د مينې سپکه وشي", 0.9, 400, 0.7, 200] ] iface = gr.Interface( fn=generate_poetry, inputs=inputs, outputs=outputs, examples=examples, title="Pashto Poetry Generator", description="Unleash the beauty of Pashto poetry with the power of deep learning", theme="default" ) iface.launch()