Update README.md
Browse files
README.md
CHANGED
|
@@ -65,5 +65,6 @@ If you use this model in your work, please cite:
|
|
| 65 |
doi = "10.18653/v1/2025.acl-long.1168",
|
| 66 |
pages = "23960--23995",
|
| 67 |
ISBN = "979-8-89176-251-0",
|
|
|
|
| 68 |
}
|
| 69 |
```
|
|
|
|
| 65 |
doi = "10.18653/v1/2025.acl-long.1168",
|
| 66 |
pages = "23960--23995",
|
| 67 |
ISBN = "979-8-89176-251-0",
|
| 68 |
+
abstract = "Cross-lingual sentence encoders (CLSE) create fixed-size sentence representations with aligned translations. Current pre-trained CLSE approaches use sentence-level objectives only. This can lead to loss of information, especially for tokens, which then degrades the sentence representation. We propose MEXMA, a novel approach that integrates both sentence-level and token-level objectives. The sentence representation in one language is used to predict masked tokens in another language, with both the sentence representation and *all tokens directly update the encoder*. We show that adding token-level objectives greatly improves the sentence representation quality across several tasks. Our approach outperforms current pre-trained cross-lingual sentence encoders on bitext mining as well as several downstream tasks. We also analyse the information encoded in our tokens, and how the sentence representation is built from them."
|
| 69 |
}
|
| 70 |
```
|