Seongsu Park commited on
Commit
c776352
β€’
1 Parent(s): aad26aa

a naive model

Browse files
Files changed (2) hide show
  1. conto.py +91 -0
  2. requirements.txt +3 -0
conto.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset, DatasetDict
2
+ from evaluate import load
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
4
+ import torch
5
+ import numpy as np
6
+
7
+ labels = {
8
+ 'contradiction': 0,
9
+ 'neutral': 1,
10
+ 'entailment': 2,
11
+ }
12
+
13
+ datasets = load_dataset('seongs1024/DKK-nli')
14
+ datasets = DatasetDict({
15
+ 'train': datasets['train'].shard(num_shards=100, index=0),
16
+ 'validation': datasets['validation'].shard(num_shards=100, index=0),
17
+ })
18
+
19
+ metric = load('glue', 'mnli')
20
+ def compute_metrics(eval_pred):
21
+ predictions, labels = eval_pred
22
+ predictions = np.argmax(predictions, axis=1)
23
+ return metric.compute(predictions=predictions, references=labels)
24
+
25
+ check_point = 'klue/roberta-small'
26
+ model = AutoModelForSequenceClassification.from_pretrained(check_point, num_labels=3)
27
+ tokenizer = AutoTokenizer.from_pretrained(check_point)
28
+
29
+ def preprocess_function(examples):
30
+ return tokenizer(
31
+ examples['premise'],
32
+ examples['hypothesis'],
33
+ truncation=True,
34
+ return_token_type_ids=False,
35
+ )
36
+ encoded_datasets = datasets.map(preprocess_function, batched=True)
37
+
38
+ batch_size = 8
39
+ args = TrainingArguments(
40
+ "test-nli",
41
+ evaluation_strategy="epoch",
42
+ save_strategy='epoch',
43
+ learning_rate=2e-5,
44
+ per_device_train_batch_size=batch_size,
45
+ per_device_eval_batch_size=batch_size,
46
+ num_train_epochs=5,
47
+ weight_decay=0.01,
48
+ load_best_model_at_end=True,
49
+ metric_for_best_model='accuracy',
50
+ )
51
+
52
+ trainer = Trainer(
53
+ model,
54
+ args,
55
+ train_dataset=encoded_datasets["train"],
56
+ eval_dataset=encoded_datasets["validation"],
57
+ tokenizer=tokenizer,
58
+ compute_metrics=compute_metrics,
59
+ )
60
+
61
+ trainer.train()
62
+ trainer.evaluate()
63
+
64
+ trainer.save_model('./model')
65
+
66
+ # features = tokenizer(
67
+ # [
68
+ # 'A man is eating pizza',
69
+ # 'A black race car starts up in front of a crowd of people.',
70
+ # '였늘 λ§›μžˆλŠ” λ°₯을 λ¨Ήμ—ˆμ–΄ 근데 이게 μ–΄λ–»κ²Œ λ§›μžˆλ‹€ μ„€λͺ…ν•˜κΈ°λŠ” μ’€ 애맀해.',
71
+ # 'λ‚˜ 집에 κ°€λŠ” 쀑.',
72
+ # ],
73
+ # [
74
+ # 'A man eats something',
75
+ # 'A man is driving down a lonely road.',
76
+ # '였늘 λ§›μ—†λŠ” λ°₯을 λ¨Ήμ—ˆμ–΄ 근데 이게 μ–΄λ–»κ²Œ λ§›μžˆλ‹€ μ„€λͺ…ν•˜κΈ°λŠ” μ’€ 애맀해.',
77
+ # 'λ‚˜ 집에 λ„μ°©ν–ˆμ–΄.',
78
+ # ],
79
+ # padding=True,
80
+ # truncation=True,
81
+ # return_tensors="pt"
82
+ # )
83
+ # print(features)
84
+
85
+ # model.eval()
86
+ # with torch.no_grad():
87
+ # scores = model(**features).logits
88
+ # print(scores)
89
+ # label_mapping = ['contradiction', 'entailment', 'neutral']
90
+ # labels = [label_mapping[score_max] for score_max in scores.argmax(dim=1)]
91
+ # print(labels)
requirements.txt CHANGED
@@ -1,3 +1,6 @@
1
  datasets==2.10.0
 
 
 
2
  torch==1.9.0
3
  transformers==4.26.0
 
1
  datasets==2.10.0
2
+ evaluate==0.4.1
3
+ scipy==1.7.3
4
+ scikit-learn==1.0.2
5
  torch==1.9.0
6
  transformers==4.26.0