m3hrdadfi commited on
Commit
7128736
1 Parent(s): 7746625

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +22 -22
README.md CHANGED
@@ -86,31 +86,31 @@ model = FlaxAutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH)
86
 
87
  prefix = "items: "
88
  # generation_kwargs = {
89
- # "max_length": 1024,
90
- # "min_length": 128,
91
  # "no_repeat_ngram_size": 3,
92
- # "do_sample": True,
93
- # "top_k": 60,
94
- # "top_p": 0.95
95
  # }
96
  generation_kwargs = {
97
  "max_length": 512,
98
  "min_length": 64,
99
  "no_repeat_ngram_size": 3,
100
- "early_stopping": True,
101
- "num_beams": 5,
102
- "length_penalty": 1.5,
103
  }
104
 
 
105
  special_tokens = tokenizer.all_special_tokens
106
  tokens_map = {
107
  "<sep>": "--",
108
- "<section>": "\
109
- "
110
  }
111
  def skip_special_tokens(text, special_tokens):
112
  for token in special_tokens:
113
- text = text.replace(token, '')
114
 
115
  return text
116
 
@@ -137,7 +137,7 @@ def generation_function(texts):
137
  max_length=256,
138
  padding="max_length",
139
  truncation=True,
140
- return_tensors='jax'
141
  )
142
 
143
  input_ids = inputs.input_ids
@@ -163,8 +163,7 @@ items = [
163
  ]
164
  generated = generation_function(items)
165
  for text in generated:
166
- sections = text.split("\
167
- ")
168
  for section in sections:
169
  section = section.strip()
170
  if section.startswith("title:"):
@@ -182,8 +181,7 @@ for text in generated:
182
  else:
183
  section_info = [f" - {i+1}: {info.strip().capitalize()}" for i, info in enumerate(section.split("--"))]
184
  print(f"[{headline}]:")
185
- print("\
186
- ".join(section_info))
187
 
188
  print("-" * 130)
189
  ```
@@ -227,14 +225,16 @@ Output:
227
  ```
228
 
229
  ## Evaluation
 
 
 
230
 
231
- The following table summarizes the scores obtained by the **Chef Transformer**. Those marked as (*) are the baseline models.
 
 
 
232
 
233
- | Model | WER | COSIM | ROUGE-2 |
234
- |-----------------|-------|-------|---------|
235
- | Recipe1M+ * | 0.786 | 0.589 | - |
236
- | RecipeNLG * | 0.751 | 0.666 | - |
237
- | ChefTransformer | 0.709 | 0.714 | 0.290 |
238
 
239
 
240
  ## Copyright
86
 
87
  prefix = "items: "
88
  # generation_kwargs = {
89
+ # "max_length": 512,
90
+ # "min_length": 64,
91
  # "no_repeat_ngram_size": 3,
92
+ # "early_stopping": True,
93
+ # "num_beams": 5,
94
+ # "length_penalty": 1.5,
95
  # }
96
  generation_kwargs = {
97
  "max_length": 512,
98
  "min_length": 64,
99
  "no_repeat_ngram_size": 3,
100
+ "do_sample": True,
101
+ "top_k": 60,
102
+ "top_p": 0.95
103
  }
104
 
105
+
106
  special_tokens = tokenizer.all_special_tokens
107
  tokens_map = {
108
  "<sep>": "--",
109
+ "<section>": "\n"
 
110
  }
111
  def skip_special_tokens(text, special_tokens):
112
  for token in special_tokens:
113
+ text = text.replace(token, "")
114
 
115
  return text
116
 
137
  max_length=256,
138
  padding="max_length",
139
  truncation=True,
140
+ return_tensors="jax"
141
  )
142
 
143
  input_ids = inputs.input_ids
163
  ]
164
  generated = generation_function(items)
165
  for text in generated:
166
+ sections = text.split("\n")
 
167
  for section in sections:
168
  section = section.strip()
169
  if section.startswith("title:"):
181
  else:
182
  section_info = [f" - {i+1}: {info.strip().capitalize()}" for i, info in enumerate(section.split("--"))]
183
  print(f"[{headline}]:")
184
+ print("\n".join(section_info))
 
185
 
186
  print("-" * 130)
187
  ```
225
  ```
226
 
227
  ## Evaluation
228
+ Since the test set is not available, we will evaluate the model based on a shared test set. This test set consists of 5% of the whole test (*= 5,000 records*),
229
+ and we will generate five recipes for each input(*= 25,000 records*).
230
+ The following table summarizes the scores obtained by the **Chef Transformer** and **RecipeNLG** as our baseline.
231
 
232
+ | Model | COSIM | WER | ROUGE-2 | BLEU | GLEU | METEOR |
233
+ |:------------------------------------------------------------------------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
234
+ | [RecipeNLG](https://huggingface.co/mbien/recipenlg) | 0.5723 | 1.2125 | 0.1354 | 0.1164 | 0.1503 | 0.2309 |
235
+ | [Chef Transformer](huggingface.co/flax-community/t5-recipe-generation) * | **0.7282** | **0.7613** | **0.2470** | **0.3245** | **0.2624** | **0.4150** |
236
 
237
+ *From the 5 generated recipes corresponding to each NER (food items), only the highest score was taken into account in the WER, COSIM, and ROUGE metrics. At the same time, BLEU, GLEU, Meteor were designed to have many possible references.*
 
 
 
 
238
 
239
 
240
  ## Copyright