philipphager commited on
Commit
96838dc
1 Parent(s): 57d3da0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +50 -14
README.md CHANGED
@@ -19,7 +19,7 @@ The model is available under `model/`.
19
  from datasets import load_dataset
20
 
21
  dataset = load_dataset(
22
- "philipphager/baidu-ultr_uva-mlm-ctr",
23
  name="clicks",
24
  split="train", # ["train", "test"]
25
  cache_dir="~/.cache/huggingface",
@@ -33,7 +33,7 @@ dataset.set_format("torch") # [None, "numpy", "torch", "tensorflow", "pandas",
33
  from datasets import load_dataset
34
 
35
  dataset = load_dataset(
36
- "philipphager/baidu-ultr_uva-mlm-ctr",
37
  name="annotations",
38
  split="test",
39
  cache_dir="~/.cache/huggingface",
@@ -50,16 +50,32 @@ Each row of the click / annotation dataset contains the following attributes. Us
50
  |------------------------------|----------------|-------------|
51
  | query_id | string | Baidu query_id |
52
  | query_md5 | string | MD5 hash of query text |
53
- | url_md5 | List[string] | MD5 hash of document url, most reliable document identifier |
 
 
 
54
  | text_md5 | List[string] | MD5 hash of document title and abstract |
55
- | query_document_embedding | Tensor[float16]| BERT CLS token |
 
 
56
  | click | Tensor[int32] | Click / no click on a document |
57
- | n | int32 | Number of documents for current query, useful for padding |
58
  | position | Tensor[int32] | Position in ranking (does not always match original item position) |
59
- | media_type | Tensor[int32] | Document type (label encoding recommended as ids do not occupy a continous integer range) |
60
- | displayed_time | Tensor[float32]| Seconds a document was displayed on screen |
61
- | serp_height | Tensor[int32] | Pixel height of a document on screen |
62
- | slipoff_count_after_click | Tensor[int32] | Number of times a document was scrolled off screen after previously clicking on it |
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
 
65
  ### Expert annotation dataset
@@ -67,11 +83,28 @@ Each row of the click / annotation dataset contains the following attributes. Us
67
  |------------------------------|----------------|-------------|
68
  | query_id | string | Baidu query_id |
69
  | query_md5 | string | MD5 hash of query text |
70
- | text_md5 | List[string] | MD5 hash of document title and abstract |
71
- | query_document_embedding | Tensor[float16]| BERT CLS token |
72
- | label | Tensor[int32] | Relevance judgment on a scale from 0 (bad) to 4 (excellent) |
73
- | n | int32 | Number of documents for current query, useful for padding |
74
  | frequency_bucket | int32 | Monthly frequency of query (bucket) from 0 (high frequency) to 9 (low frequency) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  ## Example PyTorch collate function
77
  Each sample in the dataset is a single query with multiple documents.
@@ -95,7 +128,9 @@ def collate_clicks(samples: List):
95
  batch["n"].append(sample["n"])
96
 
97
  return {
98
- "query_document_embedding": pad_sequence(batch["query_document_embedding"], batch_first=True),
 
 
99
  "position": pad_sequence(batch["position"], batch_first=True),
100
  "click": pad_sequence(batch["click"], batch_first=True),
101
  "n": torch.tensor(batch["n"]),
@@ -103,3 +138,4 @@ def collate_clicks(samples: List):
103
 
104
  loader = DataLoader(dataset, collate_fn=collate_clicks, batch_size=16)
105
  ```
 
 
19
  from datasets import load_dataset
20
 
21
  dataset = load_dataset(
22
+ "philipphager/baidu-ultr_baidu-mlm-ctr",
23
  name="clicks",
24
  split="train", # ["train", "test"]
25
  cache_dir="~/.cache/huggingface",
 
33
  from datasets import load_dataset
34
 
35
  dataset = load_dataset(
36
+ "philipphager/baidu-ultr_baidu-mlm-ctr",
37
  name="annotations",
38
  split="test",
39
  cache_dir="~/.cache/huggingface",
 
50
  |------------------------------|----------------|-------------|
51
  | query_id | string | Baidu query_id |
52
  | query_md5 | string | MD5 hash of query text |
53
+ | query | List[int32] | List of query tokens |
54
+ | query_length | int32 | Number of query tokens |
55
+ | n | int32 | Number of documents for current query, useful for padding |
56
+ | url_md5 | List[string] | MD5 hash of document URL, most reliable document identifier |
57
  | text_md5 | List[string] | MD5 hash of document title and abstract |
58
+ | title | List[List[int32]] | List of tokens for document titles |
59
+ | abstract | List[List[int32]] | List of tokens for document abstracts |
60
+ | query_document_embedding | Tensor[Tensor[float16]]| BERT CLS token |
61
  | click | Tensor[int32] | Click / no click on a document |
 
62
  | position | Tensor[int32] | Position in ranking (does not always match original item position) |
63
+ | media_type | Tensor[int32] | Document type (label encoding recommended as IDs do not occupy a continuous integer range) |
64
+ | displayed_time | Tensor[float32]| Seconds a document was displayed on the screen |
65
+ | serp_height | Tensor[int32] | Pixel height of a document on the screen |
66
+ | slipoff_count_after_click | Tensor[int32] | Number of times a document was scrolled off the screen after previously clicking on it |
67
+ | bm25 | Tensor[float32] | BM25 score for documents |
68
+ | bm25_title | Tensor[float32] | BM25 score for document titles |
69
+ | bm25_abstract | Tensor[float32] | BM25 score for document abstracts |
70
+ | tf_idf | Tensor[float32] | TF-IDF score for documents |
71
+ | tf | Tensor[float32] | Term frequency for documents |
72
+ | idf | Tensor[float32] | Inverse document frequency for documents |
73
+ | ql_jelinek_mercer_short | Tensor[float32] | Query likelihood score for documents using Jelinek-Mercer smoothing (alpha = 0.1) |
74
+ | ql_jelinek_mercer_long | Tensor[float32] | Query likelihood score for documents using Jelinek-Mercer smoothing (alpha = 0.7) |
75
+ | ql_dirichlet | Tensor[float32] | Query likelihood score for documents using Dirichlet smoothing (lambda = 128) |
76
+ | document_length | Tensor[int32] | Length of documents |
77
+ | title_length | Tensor[int32] | Length of document titles |
78
+ | abstract_length | Tensor[int32] | Length of document abstracts |
79
 
80
 
81
  ### Expert annotation dataset
 
83
  |------------------------------|----------------|-------------|
84
  | query_id | string | Baidu query_id |
85
  | query_md5 | string | MD5 hash of query text |
86
+ | query | List[int32] | List of query tokens |
87
+ | query_length | int32 | Number of query tokens |
 
 
88
  | frequency_bucket | int32 | Monthly frequency of query (bucket) from 0 (high frequency) to 9 (low frequency) |
89
+ | n | int32 | Number of documents for current query, useful for padding |
90
+ | url_md5 | List[string] | MD5 hash of document URL, most reliable document identifier |
91
+ | text_md5 | List[string] | MD5 hash of document title and abstract |
92
+ | title | List[List[int32]] | List of tokens for document titles |
93
+ | abstract | List[List[int32]] | List of tokens for document abstracts |
94
+ | query_document_embedding | Tensor[Tensor[float16]] | BERT CLS token |
95
+ | label | Tensor[int32] | Relevance judgments on a scale from 0 (bad) to 4 (excellent) |
96
+ | bm25 | Tensor[float32] | BM25 score for documents |
97
+ | bm25_title | Tensor[float32] | BM25 score for document titles |
98
+ | bm25_abstract | Tensor[float32] | BM25 score for document abstracts |
99
+ | tf_idf | Tensor[float32] | TF-IDF score for documents |
100
+ | tf | Tensor[float32] | Term frequency for documents |
101
+ | idf | Tensor[float32] | Inverse document frequency for documents |
102
+ | ql_jelinek_mercer_short | Tensor[float32] | Query likelihood score for documents using Jelinek-Mercer smoothing (alpha = 0.1) |
103
+ | ql_jelinek_mercer_long | Tensor[float32] | Query likelihood score for documents using Jelinek-Mercer smoothing (alpha = 0.7) |
104
+ | ql_dirichlet | Tensor[float32] | Query likelihood score for documents using Dirichlet smoothing (lambda = 128) |
105
+ | document_length | Tensor[int32] | Length of documents |
106
+ | title_length | Tensor[int32] | Length of document titles |
107
+ | abstract_length | Tensor[int32] | Length of document abstracts |
108
 
109
  ## Example PyTorch collate function
110
  Each sample in the dataset is a single query with multiple documents.
 
128
  batch["n"].append(sample["n"])
129
 
130
  return {
131
+ "query_document_embedding": pad_sequence(
132
+ batch["query_document_embedding"], batch_first=True
133
+ ),
134
  "position": pad_sequence(batch["position"], batch_first=True),
135
  "click": pad_sequence(batch["click"], batch_first=True),
136
  "n": torch.tensor(batch["n"]),
 
138
 
139
  loader = DataLoader(dataset, collate_fn=collate_clicks, batch_size=16)
140
  ```
141
+