File size: 1,792 Bytes
3e6a5b2 d03801b 97bb14e 3e6a5b2 d03801b f50bfc6 d03801b f50bfc6 d03801b 97bb14e f50bfc6 5b09ac2 f50bfc6 5b09ac2 f50bfc6 97bb14e 5b09ac2 ac039a5 97bb14e 3e6a5b2 d03801b 6ed37e9 d03801b 97bb14e 6ed37e9 5b09ac2 d03801b 5b09ac2 6ed37e9 3e6a5b2 5b09ac2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
---
language:
- tr
license: apache-2.0
size_categories:
- 100K<n<1M
task_categories:
- text-retrieval
- question-answering
pretty_name: MS Marco - Turkish
dataset_info:
- config_name: passages
features:
- name: pid
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 249167997
num_examples: 718217
download_size: 154088206
dataset_size: 249167997
- config_name: queries
features:
- name: qid
dtype: int64
- name: text
dtype: string
splits:
- name: train
num_bytes: 23776232
num_examples: 501428
download_size: 17669620
dataset_size: 23776232
- config_name: qrels
features:
- name: qid
dtype: int64
- name: q0
dtype: int64
- name: pid
dtype: int64
- name: rank
dtype: int64
splits:
- name: train
num_bytes: 17048352
num_examples: 532761
download_size: 6727024
dataset_size: 17048352
- config_name: triples
features:
- name: qid
dtype: int64
- name: pid_pos
dtype: int64
- name: pid_neg
dtype: int64
splits:
- name: train
num_bytes: 24000000
num_examples: 1000000
download_size: 15887012
dataset_size: 24000000
configs:
- config_name: passages
data_files:
- split: train
path: data/collection/train-*
default: true
- config_name: qrels
data_files:
- split: train
path: data/qrels/train-*
- config_name: queries
data_files:
- split: train
path: data/queries/train-*
- config_name: triples
data_files:
- split: train
path: data/triples/train-*
tags:
- msmarco
- passage-retrieval
- text-retrieval
- passage-ranking
- colbert
---
# Dataset Card for "msmarco-tr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|