mauricett commited on
Commit
315cb18
1 Parent(s): 92dd194

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +39 -25
README.md CHANGED
@@ -86,39 +86,41 @@ def preprocess(example, useful_fn):
86
  <br>
87
  <br>
88
  <br>
 
89
  # Complete Example
90
 
91
  ```py
92
  import random
93
- import datasets
 
94
 
95
- # Shuffle and apply your own preprocessing.
96
- dataset = dataset.shuffle(seed=42)
97
- dataset = dataset.map(preprocess, fn_kwargs={'tokenizer': tokenizer})
98
- ```
99
-
100
- For a quick working example, you can try to use the following:
101
- ```py
102
- # A mock tokenizer and preprocess function for demonstration.
103
  class Tokenizer:
 
 
104
  def __call__(self, example):
105
  return example
106
 
107
- def preprocess(example, useful_fn):
 
 
 
 
108
  # Get number of moves made in the game.
109
- max_ply = len(example['moves'])
110
- pick_random_move = random.randint(0, max_ply)
111
 
112
- # Get the FEN, move and score for our random choice.
113
- fen = example['fens'][pick_random_move]
114
- move = example['moves'][pick_random_move]
115
- score = example['scores'][pick_random_move]
 
 
 
 
 
 
116
 
117
- # Transform data into the format of your choice.
118
- example['fens'] = useful_fn(fen)
119
- example['moves'] = useful_fn(move)
120
- example['scores'] = useful_fn(score)
121
- return example
122
 
123
  tokenizer = Tokenizer()
124
 
@@ -128,11 +130,23 @@ dataset = load_dataset(path="mauricett/lichess_sf",
128
  streaming=True,
129
  trust_remote_code=True)
130
 
 
131
  # Shuffle and apply your own preprocessing.
132
  dataset = dataset.shuffle(seed=42)
133
- dataset = dataset.map(preprocess, fn_kwargs={'tokenizer': tokenizer})
 
 
 
 
 
 
 
 
 
 
134
 
135
- for batch in dataset:
136
- # do stuff
137
- break
 
138
  ```
 
86
  <br>
87
  <br>
88
  <br>
89
+
90
  # Complete Example
91
 
92
  ```py
93
  import random
94
+ from datasets import load_dataset
95
+ from torch.utils.data import DataLoader
96
 
97
+ # A mock tokenizer and functions for demonstration.
 
 
 
 
 
 
 
98
  class Tokenizer:
99
+ def __init__(self):
100
+ pass
101
  def __call__(self, example):
102
  return example
103
 
104
+ def score_fn(score):
105
+ # Transform Stockfish score and terminal outcomes.
106
+ return score
107
+
108
+ def preprocess(example, tokenizer, score_fn):
109
  # Get number of moves made in the game.
110
+ max_ply = len(example['moves'])
111
+ pick_random_move = random.randint(0, max_ply-1)
112
 
113
+ # Get the FEN, move and score for our random choice.
114
+ fen = example['fens'][pick_random_move]
115
+ move = example['moves'][pick_random_move]
116
+ score = example['scores'][pick_random_move]
117
+
118
+ # Transform data into the format of your choice.
119
+ example['fens'] = tokenizer(fen)
120
+ example['moves'] = tokenizer(move)
121
+ example['scores'] = score_fn(score)
122
+ return example
123
 
 
 
 
 
 
124
 
125
  tokenizer = Tokenizer()
126
 
 
130
  streaming=True,
131
  trust_remote_code=True)
132
 
133
+
134
  # Shuffle and apply your own preprocessing.
135
  dataset = dataset.shuffle(seed=42)
136
+ dataset = dataset.map(preprocess, fn_kwargs={'tokenizer': tokenizer,
137
+ 'score_fn': score_fn})
138
+
139
+ # PyTorch dataloader
140
+ dataloader = DataLoader(dataset, batch_size=256, num_workers=4)
141
+
142
+
143
+ n = 0
144
+ for batch in dataloader:
145
+
146
+ # do stuff
147
 
148
+ n += 1
149
+ print(n)
150
+ if n == 50:
151
+ break
152
  ```