Spaces:
Runtime error
Runtime error
File size: 4,307 Bytes
eeacda6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import torch
import pandas as pd
from transformers import BartTokenizer, BartForConditionalGeneration
import gradio as gr
# Initialize models and tokenizers for Healthcare and AI perspectives
healthcare_model_name = 'facebook/bart-large-cnn' # Healthcare-focused model
ai_model_name = 'facebook/bart-large-xsum' # AI-focused model
healthcare_tokenizer = BartTokenizer.from_pretrained(healthcare_model_name)
ai_tokenizer = BartTokenizer.from_pretrained(ai_model_name)
healthcare_model = BartForConditionalGeneration.from_pretrained(healthcare_model_name)
ai_model = BartForConditionalGeneration.from_pretrained(ai_model_name)
# Summarization function for both Healthcare and AI agents
def generate_summary(text, tokenizer, model):
inputs = tokenizer(text, return_tensors="pt", max_length=1024, truncation=True, padding="max_length")
with torch.no_grad():
outputs = model.generate(inputs["input_ids"], max_length=150, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def healthcare_agent(abstract):
return generate_summary(abstract, healthcare_tokenizer, healthcare_model)
def ai_agent(abstract):
return generate_summary(abstract, ai_tokenizer, ai_model)
# Interaction function to generate implications based on both agents' insights
def generate_implications(healthcare_summary, ai_summary):
healthcare_implication = f"Healthcare Implications: {healthcare_summary} The healthcare sector can leverage these findings to improve patient care and treatment outcomes."
ai_implication = f"AI Implications: {ai_summary} These insights can further enhance AI models, making them more applicable in real-world healthcare scenarios."
# Combine both implications to provide a holistic view
combined_implications = f"{healthcare_implication}\n\n{ai_implication}"
return combined_implications
# Function to process the CSV and generate results
def process_and_generate_implications(csv_file):
# Read the input CSV file containing titles and abstracts
papers_df = pd.read_csv(csv_file.name, encoding='latin-1')
# Check if 'title' and 'abstract' columns exist
required_columns = ['title', 'abstract']
if not all(col.lower() in papers_df.columns.str.lower() for col in required_columns):
return "The CSV must contain 'title' and 'abstract' columns."
# Drop rows where title or abstract is missing
papers_df = papers_df.dropna(subset=['title', 'abstract'])
results = []
# Process each paper (row) in the CSV
for _, row in papers_df.iterrows():
title = row['title']
abstract = str(row['abstract'])
# Generate summaries using both agents
healthcare_summary = healthcare_agent(abstract)
ai_summary = ai_agent(abstract)
# Generate the implications based on both summaries
implications = generate_implications(healthcare_summary, ai_summary)
# Store the results
results.append({
"Title": title,
"Abstract": abstract,
"Healthcare Summary": healthcare_summary,
"AI Summary": ai_summary,
"Implications": implications
})
# Convert results into a DataFrame
results_df = pd.DataFrame(results)
# Return the results as a CSV string for download
return results_df.to_csv(index=False)
# Define Gradio interface
def create_interface():
with gr.Blocks() as demo:
gr.Markdown("## Research Paper Summarization and Implications")
gr.Markdown("Upload a CSV file with 'title' and 'abstract' columns to generate healthcare and AI implications.")
# Upload CSV file
csv_input = gr.File(label="Upload CSV File", type="file")
# Button to process the CSV and generate results
output_csv = gr.File(label="Download Results CSV")
# Process CSV and generate implications on button click
csv_input.change(process_and_generate_implications, inputs=csv_input, outputs=output_csv)
return demo
# Launch the interface
if __name__ == "__main__":
demo = create_interface()
demo.launch(debug=True) # Set debug=True to see detailed logs
|