VictoriaLinML commited on
Commit
a1060a0
1 Parent(s): 537f1d8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +93 -1
README.md CHANGED
@@ -47,4 +47,96 @@ The training data statistics of XGLM-1.7B is shown in the table below.
47
 
48
  ## Model card
49
 
50
- For intended usage of the model, please refer to the [model card](https://github.com/pytorch/fairseq/blob/main/examples/xglm/model_card.md) released by the XGLM-1.7B development team.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  ## Model card
49
 
50
+ For intended usage of the model, please refer to the [model card](https://github.com/pytorch/fairseq/blob/main/examples/xglm/model_card.md) released by the XGLM-1.7B development team.
51
+
52
+ ## Example (COPA)
53
+ The following snippet shows how to evaluate our models (GPT-3 style, zero-shot) on the Choice of Plausible Alternatives (COPA) task, using examples in English, Chinese and Hindi.
54
+
55
+ ```python
56
+ import torch
57
+ import torch.nn.functional as F
58
+
59
+ from transformers import XGLMTokenizer, XGLMForCausalLM
60
+
61
+ tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-1.7B")
62
+ model = XGLMForCausalLM.from_pretrained("facebook/xglm-1.7B")
63
+
64
+ data_samples = {
65
+ 'en': [
66
+ {
67
+ "premise": "I wanted to conserve energy.",
68
+ "choice1": "I swept the floor in the unoccupied room.",
69
+ "choice2": "I shut off the light in the unoccupied room.",
70
+ "question": "effect",
71
+ "label": "1"
72
+ },
73
+ {
74
+ "premise": "The flame on the candle went out.",
75
+ "choice1": "I blew on the wick.",
76
+ "choice2": "I put a match to the wick.",
77
+ "question": "cause",
78
+ "label": "0"
79
+ }
80
+ ],
81
+ 'zh': [
82
+ {
83
+ "premise": "我想节约能源。",
84
+ "choice1": "我在空着的房间里扫了地板。",
85
+ "choice2": "我把空房间里的灯关了。",
86
+ "question": "effect",
87
+ "label": "1"
88
+ },
89
+ {
90
+ "premise": "蜡烛上的火焰熄灭了。",
91
+ "choice1": "我吹灭了灯芯。",
92
+ "choice2": "我把一根火柴放在灯芯上。",
93
+ "question": "cause",
94
+ "label": "0"
95
+ }
96
+ ],
97
+ 'hi': [
98
+ {
99
+ "premise": "M te vle konsève enèji.",
100
+ "choice1": "Mwen te fin baleye chanm lib la.",
101
+ "choice2": "Mwen te femen limyè nan chanm lib la.",
102
+ "question": "effect",
103
+ "label": "1"
104
+ },
105
+ {
106
+ "premise": "Flam bouji a te etenn.",
107
+ "choice1": "Mwen te soufle bouji a.",
108
+ "choice2": "Mwen te limen mèch bouji a.",
109
+ "question": "cause",
110
+ "label": "0"
111
+ }
112
+ ]
113
+ }
114
+
115
+ def get_logprobs(prompt):
116
+ inputs = tokenizer(prompt, return_tensors="pt")
117
+ input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:]
118
+ outputs = model(**inputs, labels=input_ids)
119
+ logits = outputs.logits
120
+ logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2))
121
+ return logprobs
122
+
123
+ # Zero-shot evaluation for the Choice of Plausible Alternatives (COPA) task.
124
+ # A return value of 0 indicates that the first alternative is more plausible,
125
+ # while 1 indicates that the second alternative is more plausible.
126
+ def COPA_eval(prompt, alternative1, alternative2):
127
+ lprob1 = get_logprobs(prompt + "\n" + alternative1).sum()
128
+ lprob2 = get_logprobs(prompt + "\n" + alternative2).sum()
129
+ return 0 if lprob1 > lprob2 else 1
130
+
131
+ for lang in data_samples_long:
132
+ for idx, example in enumerate(data_samples_long[lang]):
133
+ predict = COPA_eval(example["premise"], example["choice1"], example["choice2"])
134
+ print(f'{lang}-{idx}', predict, example['label'])
135
+
136
+ # en-0 1 1
137
+ # en-1 0 0
138
+ # zh-0 1 1
139
+ # zh-1 0 0
140
+ # hi-0 1 1
141
+ # hi-1 0 0
142
+ ```