Update README.md
Browse files
README.md
CHANGED
@@ -229,7 +229,7 @@ configs:
|
|
229 |
The dataset can be loaded using the command
|
230 |
```
|
231 |
task = "hotpotqa" # it can be any other option like triviaqa,popqa,2wiki, MuSiQue, NaturalQuestions etc.
|
232 |
-
load_dataset("
|
233 |
```
|
234 |
|
235 |
## 2WikiHotpotQA
|
@@ -340,5 +340,16 @@ TruthfulQA is a benchmark to measure whether a language model is truthful in gen
|
|
340 |
|
341 |
## Citation
|
342 |
|
343 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
|
|
229 |
The dataset can be loaded using the command
|
230 |
```
|
231 |
task = "hotpotqa" # it can be any other option like triviaqa,popqa,2wiki, MuSiQue, NaturalQuestions etc.
|
232 |
+
load_dataset("Salesforce/ContextualBench",task,split="validation")
|
233 |
```
|
234 |
|
235 |
## 2WikiHotpotQA
|
|
|
340 |
|
341 |
## Citation
|
342 |
|
343 |
+
|
344 |
+
```
|
345 |
+
@article{nguyen2024sfrrag,
|
346 |
+
title={SFR-RAG: Towards Contextually Faithful LLMs},
|
347 |
+
author={Nguyen, Xuan-Phi and Pandit, Shrey and Purushwalkam, Senthil and Xu, Austin and Chen, Hailin and Ming, Yifei and Ke, Zixuan and Savarese, Silvio and Xong, Caiming and Joty, Shafiq},
|
348 |
+
year={2024}
|
349 |
+
}
|
350 |
+
|
351 |
+
```
|
352 |
+
|
353 |
+
|
354 |
+
|
355 |
|