qlemesle commited on
Commit
13237c8
·
1 Parent(s): f2bbed7

add pplui code

Browse files
Files changed (2) hide show
  1. CODE +1 -0
  2. parapluie.py +18 -16
CODE ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 3b11ddbfd61e30807beae3a42d09babfa07e0c63
parapluie.py CHANGED
@@ -15,6 +15,7 @@
15
 
16
  import evaluate
17
  import datasets
 
18
 
19
  _CITATION = """\
20
  @inproceedings{lemesle-etal-2025-paraphrase,
@@ -78,11 +79,7 @@ Examples:
78
  class Parapluie(evaluate.Metric):
79
  """TODO: Short description of my evaluation module."""
80
 
81
- # def __init__(self):
82
- # print("is this working ?")
83
-
84
  def _info(self):
85
- # TODO: Specifies the evaluate.EvaluationModuleInfo object
86
  return evaluate.MetricInfo(
87
  # This is the description that will appear on the modules page.
88
  module_type="metric",
@@ -94,23 +91,28 @@ class Parapluie(evaluate.Metric):
94
  'source': datasets.Value("string"),
95
  'hypothese': datasets.Value("string"),
96
  }),
97
- # Homepage of the module for documentation
98
- # homepage="http://module.homepage",
99
- # Additional links to the codebase or references
100
  codebase_urls=["https://gitlab.inria.fr/expression/paraphrase-generation-evaluation-powered-by-an-llm-a-semantic-metric-not-a-lexical-one-coling-2025"],
101
- # reference_urls=["http://path.to.reference.url/new_module"]
102
  )
103
 
104
- def _download_and_prepare(self, something):
105
- # initialisation of metric
106
- print("is this working ? is this replacing init ?")
107
- print("my arg: ", something)
108
  pass
109
 
110
- def _compute(self, predictions, references):
 
 
 
 
 
 
 
 
 
 
 
111
  """Returns the scores"""
112
- # TODO: Compute the different scores of the module
113
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
114
  return {
115
- "accuracy": accuracy,
116
  }
 
15
 
16
  import evaluate
17
  import datasets
18
+ from CODE.ParaPLUIE.PPLUIE import ppluie
19
 
20
  _CITATION = """\
21
  @inproceedings{lemesle-etal-2025-paraphrase,
 
79
  class Parapluie(evaluate.Metric):
80
  """TODO: Short description of my evaluation module."""
81
 
 
 
 
82
  def _info(self):
 
83
  return evaluate.MetricInfo(
84
  # This is the description that will appear on the modules page.
85
  module_type="metric",
 
91
  'source': datasets.Value("string"),
92
  'hypothese': datasets.Value("string"),
93
  }),
 
 
 
94
  codebase_urls=["https://gitlab.inria.fr/expression/paraphrase-generation-evaluation-powered-by-an-llm-a-semantic-metric-not-a-lexical-one-coling-2025"],
 
95
  )
96
 
97
+ def _download_and_prepare(self, dl_manager):
98
+ # rewrite of init...
99
+ self.scorer = None
 
100
  pass
101
 
102
+ def init(
103
+ self,
104
+ model,
105
+ device = "cuda:0",
106
+ template = "FS-DIRECT",
107
+ use_chat_template = True,
108
+ half_mode = True,
109
+ n_right_specials_tokens = 1
110
+ ):
111
+ self.scorer = ppluie(model, device, template, use_chat_template, half_mode, n_right_specials_tokens)
112
+
113
+ def _compute(self, S, H):
114
  """Returns the scores"""
115
+ score = self.scorer(S, H)
 
116
  return {
117
+ "score": score,
118
  }