jjuarez commited on
Commit
912fb22
1 Parent(s): 6890e7b

Upload untitled0.py

Browse files
Files changed (1) hide show
  1. untitled0.py +126 -0
untitled0.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled0.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1aMkctyYgdHD61sv7-bJHFN1B5taCv6c2
8
+ """
9
+
10
+ import gradio as gr
11
+ from datasets import load_dataset
12
+ import evaluate
13
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
14
+ import numpy as np
15
+ import nltk
16
+
17
+ nltk.download("punkt")
18
+ raw_dataset = load_dataset("scientific_papers", "pubmed")
19
+ metric = evaluate.load("rouge")
20
+ model_checkpoint = "t5-small"
21
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
22
+
23
+ if model_checkpoint in ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"]:
24
+ prefix = "summarize: "
25
+ else:
26
+ prefix = ""
27
+
28
+ # preprocessing function
29
+ max_input_length = 512
30
+ max_target_length = 128
31
+ def preprocess_function(examples):
32
+ inputs = [prefix + doc for doc in examples["article"]]
33
+ model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
34
+
35
+ # Setup the tokenizer for targets
36
+ # with tokenizer.as_target_tokenizer():
37
+ labels = tokenizer(text_target=examples["abstract"], max_length=max_target_length, truncation=True)
38
+
39
+ model_inputs["labels"] = labels["input_ids"]
40
+ return model_inputs
41
+
42
+ for split in ["train", "validation", "test"]:
43
+ raw_dataset[split] = raw_dataset[split].select([n for n in np.random.randint(0, len(raw_dataset[split]) - 1, 1_000)])
44
+ tokenized_dataset = raw_dataset.map(preprocess_function, batched=True)
45
+
46
+ model_name = "fine-tuned-t5-small"
47
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
48
+
49
+ batch_size = 8
50
+
51
+ args = Seq2SeqTrainingArguments(
52
+ f"{model_name}-scientific_papers",
53
+ evaluation_strategy="epoch",
54
+ learning_rate=2e-5,
55
+ per_device_train_batch_size=batch_size,
56
+ per_device_eval_batch_size=batch_size,
57
+ weight_decay=0.01,
58
+ save_total_limit=3,
59
+ num_train_epochs=1,
60
+ predict_with_generate=True,
61
+ # fp16=True,
62
+ push_to_hub=False,
63
+ gradient_accumulation_steps=2
64
+ )
65
+
66
+ data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
67
+
68
+ # computing metrics from the predictions
69
+ def compute_metrics(eval_pred):
70
+ predictions, labels = eval_pred
71
+ decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
72
+ # Replace -100 in the labels as we can't decode them.
73
+ labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
74
+ decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
75
+ # Rouge expects a newline after each sentence
76
+ decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
77
+ decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
78
+ result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
79
+ # Extract a few results
80
+ result = {key: value * 100 for key, value in result.items()}
81
+ # Add mean generated length
82
+ prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions]
83
+ result["gen_len"] = np.mean(prediction_lens)
84
+ return {k: round(v, 4) for k, v in result.items()}
85
+
86
+ trainer = Seq2SeqTrainer(
87
+ model,
88
+ args,
89
+ train_dataset=tokenized_dataset["train"],
90
+ eval_dataset=tokenized_dataset["validation"],
91
+ data_collator=data_collator,
92
+ tokenizer=tokenizer,
93
+ compute_metrics=compute_metrics
94
+ )
95
+ trainer.train()
96
+
97
+ # Define the input and output interface of the app
98
+ import gradio as gr
99
+
100
+ def summarizer(input_text):
101
+ inputs = [prefix + input_text]
102
+ model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True, return_tensors="pt")
103
+ summary_ids = model.generate(
104
+ input_ids=model_inputs["input_ids"],
105
+ attention_mask=model_inputs["attention_mask"],
106
+ num_beams=4,
107
+ length_penalty=2.0,
108
+ max_length=max_target_length + 2, # +2 from original because we start at step=1 and stop before max_length
109
+ repetition_penalty=2.0,
110
+ early_stopping=True,
111
+ use_cache=True
112
+ )
113
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
114
+ return summary
115
+
116
+ # Interface creation and launching
117
+ iface = gr.Interface(
118
+ fn=summarizer,
119
+ inputs=gr.inputs.Textbox(label="Input Text"),
120
+ outputs=gr.outputs.Textbox(label="Summary"),
121
+ title="Scientific Paper Summarizer",
122
+ description="Summarizes scientific papers using a fine-tuned T5 model",
123
+ article="https://huggingface.co/blog/scientific-summarization-with-t5-fine-tuning",
124
+ theme="gray"
125
+ )
126
+ iface.launch()