11Anupam commited on
Commit
2d8f19a
1 Parent(s): 4260a3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -1
app.py CHANGED
@@ -1,7 +1,70 @@
1
  import gradio as gr
2
 
3
  def greet(name):
4
- return "Hello " + name + "!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
 
 
1
  import gradio as gr
2
 
3
  def greet(name):
4
+ from datasets import load_dataset
5
+
6
+ dataset = load_dataset("yelp_review_full")
7
+ dataset["train"][100]
8
+
9
+ #creating the dataset
10
+ from transformers import AutoTokenizer
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
13
+
14
+
15
+ def tokenize_function(examples):
16
+ return tokenizer(examples["text"], padding="max_length", truncation=True)
17
+
18
+ #mapping the values:
19
+ tokenized_datasets = dataset.map(tokenize_function, batched=True)
20
+
21
+ #small Datasets:
22
+ small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
23
+ small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
24
+
25
+ #Loading pretrained Model:
26
+ from transformers import AutoModelForSequenceClassification
27
+
28
+ model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
29
+
30
+ ###
31
+
32
+ from transformers import TrainingArguments
33
+
34
+ training_args = TrainingArguments(output_dir="test_trainer")
35
+
36
+ #Evaluate
37
+
38
+ def compute_metrics(eval_pred):
39
+ logits, labels = eval_pred
40
+ predictions = np.argmax(logits, axis=-1)
41
+ return metric.compute(predictions=predictions, references=labels)
42
+
43
+ #Training Argumnents and importing Trainer:
44
+ from transformers import TrainingArguments, Trainer
45
+
46
+ training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")
47
+
48
+ #Defining Hyperparameters for Trainer:
49
+ trainer = Trainer(
50
+ model=model,
51
+ args=training_args,
52
+ train_dataset=small_train_dataset,
53
+ eval_dataset=small_eval_dataset,
54
+ compute_metrics=compute_metrics,
55
+ )
56
+
57
+ #Execute the training:
58
+ trainer.train()
59
+
60
+ #Predictions:
61
+ predictions = trainer.predict(small_eval_dataset)
62
+ print(predictions.predictions.shape,predictions.label_ids.shape)
63
+ return predictions
64
+
65
+
66
+
67
+
68
 
69
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
70