Spaces:
Runtime error
Runtime error
File size: 1,637 Bytes
28dd5be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
# Assuming you have loaded your model and tokenizer
# Replace this with your actual model and tokenizer
# Define the model function for Gradio
def generate_summary(input_text):
# # Tokenize the input text
# inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
# # Generate summary using the model
# outputs = model.generate(**inputs)
# # Decode the generated summary
# summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
# return summary
# Create a text generation pipeline
# text_generation_pipeline = pipeline("Falconsai/medical_summarization", model=model, tokenizer=tokenizer)
tokenizer = AutoTokenizer.from_pretrained("Shariar00/medical_summarization_finetune_medical_qa")
model = AutoModelForSeq2SeqLM.from_pretrained("Shariar00/medical_summarization_finetune_medical_qa")
text_generation_pipeline = pipeline("summarization", model=model, tokenizer=tokenizer)
# Generate text using the pipeline
prompt = "Hello, I am feeling very pain on my leg, I can not walk properly. I have some knee pain also. what can I do now?"
output = text_generation_pipeline(input_text, max_length=512, num_return_sequences=1)
# Print the generated text
generated_text = output[0]
return generated_text
# Create a Gradio interface
iface = gr.Interface(
fn=generate_summary,
inputs="text",
outputs="text",
# Set to True for live updates without restarting the server
)
# Launch the Gradio interface
iface.launch()
|