Rachneet commited on
Commit
a1a7158
1 Parent(s): b25ffd1

chore: add README

Browse files
Files changed (1) hide show
  1. README.md +43 -0
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # load model
2
+
3
+ ```
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ AutoConfig,
7
+ AutoModelForSeq2SeqLM
8
+ )
9
+
10
+ model_path = "T5-large-esnli-impli-figurative"
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
13
+ config = AutoConfig.from_pretrained(model_path)
14
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
15
+
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+
18
+ premise = "I just caught a guy picking up used chewing gum and he put it in his mouth."
19
+ hypothesis = "it was such a pleasant sight to see a guy picking up used chewing gum; and he put it in his mouth"
20
+ prepared_input = f"figurative hypothesis: {hypothesis} premise: {premise}"
21
+ features = tokenizer(prepared_input, max_length=128, padding="max_length", truncation=True, return_tensors="pt")
22
+
23
+ model.eval()
24
+ model.to(device)
25
+ with torch.no_grad():
26
+ # https://huggingface.co/blog/how-to-generate
27
+ generated_ids = model.generate(
28
+ **features,
29
+ max_length=128,
30
+ use_cache=True,
31
+ num_beams=4,
32
+ length_penalty=0.6,
33
+ early_stopping=True,
34
+ )
35
+ dec_preds = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
+ print("The prediction is: ", dec_preds)
37
+ print(dec_preds[1:].replace("explanation:", "").lstrip())
38
+
39
+ ```
40
+
41
+ # Example input
42
+
43
+ figurative hypothesis: I was gone for only a few days and my considerate adult son just let the sink fill up with dirty dishes, making me feel really happy premise: I left my adult son home for a few days and just came back to a sink full of gross old dishes.