11Anupam commited on
Commit
2c2a89b
1 Parent(s): 0765808

Upload demo_03.py

Browse files
Files changed (1) hide show
  1. demo_03.py +60 -0
demo_03.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ dataset = load_dataset("yelp_review_full")
4
+ dataset["train"][100]
5
+
6
+ #creating the dataset
7
+ from transformers import AutoTokenizer
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
10
+
11
+
12
+ def tokenize_function(examples):
13
+ return tokenizer(examples["text"], padding="max_length", truncation=True)
14
+
15
+ #mapping the values:
16
+ tokenized_datasets = dataset.map(tokenize_function, batched=True)
17
+
18
+ #small Datasets:
19
+ small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
20
+ small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
21
+
22
+ #Loading pretrained Model:
23
+ from transformers import AutoModelForSequenceClassification
24
+
25
+ model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
26
+
27
+ ###
28
+
29
+ from transformers import TrainingArguments
30
+
31
+ training_args = TrainingArguments(output_dir="test_trainer")
32
+
33
+ #Evaluate
34
+
35
+ def compute_metrics(eval_pred):
36
+ logits, labels = eval_pred
37
+ predictions = np.argmax(logits, axis=-1)
38
+ return metric.compute(predictions=predictions, references=labels)
39
+
40
+ #Training Argumnents and importing Trainer:
41
+ from transformers import TrainingArguments, Trainer
42
+
43
+ training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")
44
+
45
+ #Defining Hyperparameters for Trainer:
46
+ trainer = Trainer(
47
+ model=model,
48
+ args=training_args,
49
+ train_dataset=small_train_dataset,
50
+ eval_dataset=small_eval_dataset,
51
+ compute_metrics=compute_metrics,
52
+ )
53
+
54
+ #Execute the training:
55
+ trainer.train()
56
+
57
+ #Predictions:
58
+ predictions = trainer.predict(small_eval_dataset)
59
+ print(predictions.predictions.shape,predictions.label_ids.shape)
60
+ print(predictions)