mrm8488 commited on
Commit
1875768
1 Parent(s): 526964f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +24 -4
README.md CHANGED
@@ -16,13 +16,33 @@ This model is a fine-tuned version of [bigscience/bloom-560m](https://huggingfac
16
  ## Usage
17
 
18
  ```py
19
- from transformers import pipeline
 
20
 
21
- model_ckpt = "mrm8488/electricidad-base-finetuned-sst2-es"
22
 
23
- classifier = pipeline("sentiment-analysis", model=model_ckpt)
24
 
25
- classifier("Here your text in Spanish!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  ```
27
 
28
  ### Evaluation results
 
16
  ## Usage
17
 
18
  ```py
19
+ from datasets import load_dataset
20
+ from transformers import BloomTokenizerFast, BloomForCausalLM
21
 
22
+ valid_dataset = load_dataset('totto', split='validation')
23
 
24
+ from preprocess import preprocess # This file is included in the repo
25
 
26
+ # Now we linearize the tables
27
+ valid_dataset = valid_dataset.map(preprocess)
28
+
29
+ model_ckpt = "mrm8488/bloom-560m-finetuned-totto-table-to-text"
30
+
31
+ tokenizer = BloomTokenizerFast.from_pretrained(ckpt)
32
+ model = BloomForCausalLM.from_pretrained(ckpt).to("cuda")
33
+
34
+
35
+ def explain_hl_cells(text):
36
+ inputs = tokenizer(text, return_tensors='pt')
37
+ input_ids = inputs.input_ids.to("cuda")
38
+ attention_mask = inputs.attention_mask.to("cuda")
39
+ output = model.generate(input_ids, attention_mask=attention_mask, max_length=2048, eos_token_id=tokenizer.eos_token_id) # num_beams=3, temperature=1.9
40
+
41
+ return tokenizer.decode(output[0], skip_special_tokens=False)
42
+
43
+ example = valid_dataset[1]
44
+
45
+ print(explain_hl_cells(example['linearized_table'])
46
  ```
47
 
48
  ### Evaluation results