File size: 50,978 Bytes
e4890d1
 
 
 
 
35712ae
e4890d1
 
cb97215
f44258c
 
 
 
 
 
 
 
 
 
 
 
e4890d1
1b97ac6
e4890d1
 
 
 
 
8938ad9
 
 
e87f489
e4890d1
 
4287f31
 
e4890d1
 
4287f31
 
e4890d1
8938ad9
 
 
 
e4890d1
 
 
 
 
4287f31
e4890d1
 
e87f489
e4890d1
 
 
 
 
 
 
 
15cd998
2f11f71
e4890d1
e87f489
e4890d1
8ff664b
e4890d1
1d55115
e4890d1
e8fda65
 
 
 
1d55115
 
 
 
 
 
 
 
15cd998
1d55115
 
 
 
e8fda65
 
 
 
1d55115
 
 
 
8ff664b
1d55115
 
e8fda65
 
8ff664b
3ceb404
e8fda65
1d55115
 
 
 
 
 
 
 
 
0fd3ccd
 
 
 
 
1d55115
 
e8fda65
 
 
 
8ff664b
15cd998
e8fda65
b0b6916
e8fda65
62e9987
 
e8fda65
1d55115
 
e4890d1
15cd998
 
62e9987
8ff664b
15cd998
 
62e9987
15cd998
62e9987
15cd998
 
62e9987
 
33c5e72
e4890d1
15cd998
 
62e9987
15cd998
 
62e9987
15cd998
 
62e9987
15cd998
 
62e9987
15cd998
62e9987
15cd998
62e9987
15cd998
62e9987
15cd998
e4890d1
8ff664b
e4890d1
68d5169
 
15cd998
e4890d1
15cd998
 
 
68d5169
15cd998
 
e4890d1
15cd998
 
e4890d1
15cd998
 
e4890d1
15cd998
 
 
e4890d1
15cd998
 
 
 
 
 
e4890d1
15cd998
62e9987
15cd998
e4890d1
15cd998
e4890d1
15cd998
 
 
e4890d1
15cd998
 
 
e4890d1
15cd998
e4890d1
f079c42
15cd998
1d55115
15cd998
e4890d1
15cd998
 
 
 
 
cb97215
15cd998
eb48e68
15cd998
b0b6916
15cd998
 
62e9987
15cd998
eb48e68
15cd998
 
 
 
 
 
 
eb48e68
15cd998
eb48e68
15cd998
62e9987
e4890d1
15cd998
 
62e9987
15cd998
e4890d1
62e9987
15cd998
 
 
eb48e68
8ff664b
eb48e68
15cd998
 
 
 
 
 
eb48e68
62e9987
 
15cd998
62e9987
15cd998
eb48e68
 
15cd998
 
eb48e68
 
28c2bc9
8ff664b
f44258c
28c2bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15cd998
eb48e68
15cd998
 
 
 
eb48e68
15cd998
eb48e68
15cd998
 
3a027ab
15cd998
 
 
 
 
 
eb48e68
15cd998
eb48e68
15cd998
 
e4890d1
15cd998
0fd3ccd
15cd998
0fd3ccd
15cd998
62e9987
15cd998
1b97ac6
15cd998
e4890d1
15cd998
 
62e9987
15cd998
e4890d1
15cd998
0fd3ccd
15cd998
8ff664b
 
15cd998
8ff664b
15cd998
 
 
0fd3ccd
15cd998
84b186a
15cd998
e4890d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e87f489
e4890d1
e87f489
 
 
 
e4890d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
<!DOCTYPE html>
<html>
<head>
    <script src="distill.bundle.js" type="module" fetchpriority="high" blocking></script>
    <script src="main.bundle.js" type="module" fetchpriority="low" defer></script>
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <meta charset="utf8">
    <base target="_blank">
    <title>Scaling test-time compute for open models: How we implemented DeepMind’s compute-optimal recipe to solve hard math problems like OpenAI’s o1</title>
    <style>
        figure {
            max-width: 100%;
            overflow-x: auto;
            -webkit-overflow-scrolling: touch; /* Smooth scrolling on iOS */
        }

        figure img {
            max-width: none; /* Allows image to maintain original size */
            height: auto;
        }
    </style>
    <link rel="stylesheet" href="style.css">
    <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</head>

<body>
<d-front-matter>
    <script id='distill-front-matter' type="text/json">{
    "title": "📈 Scaling test-time compute with open models",
    "description": "This blog presents our work on scaling test-time compute for large language models, we share an open implementation of DeepMind's Scaling Test Time Compute paper and introduce a new method; diverse verifier tree search (DVTS) that improves scaling performance.",
    "published": "Dec 16, 2024",
    "affiliation": {"name": "Hugging Face"},
    "authors": [
      {
        "author":"Edward Beeching",
        "authorURL":"https://huggingface.co/edbeeching"
      },
      {
        "author":"Lewis Tunstall",
        "authorURL":"https://huggingface.co/lewtun"
      },
      {
        "author":"Sasha Rush",
        "authorURL":"https://huggingface.co/srush"
      }
    ]
    }</script>
</d-front-matter>

<d-title>
    <h1 class="l-page" style="text-align: center; display: none;">Scaling test-time compute</h1>
    <div id="title-plot" class="main-plot-container l-page">
        <figure>
            <img src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/banner.gif" alt="Scaling test-time compute with open models">
        </figure>
    </div>
</d-title>
<d-byline></d-byline>
<d-article>
    <d-contents>
    </d-contents>

    <!-- INTRODUCTION -->
    <p>Over the last few years, the scaling of <em><strong>train-time compute</strong></em> has dominated the progress of large language models (LLMs).<d-footnote>Here, train-time compute refers to increasing model size, dataset size, and compute budgets in line with <a href="https://huggingface.co/papers/2001.08361">scaling laws.</a></d-footnote>Although this paradigm has proven to be remarkably effective, the  resources needed to pretrain ever larger models are becoming prohibitively expensive, with <a href="https://youtu.be/WXhikNA5PIc?feature=shared">billion-dollar clusters</a> already on the horizon.<d-footnote>Aside from compute resources, Ilya Sutskever has made the <a href="https://www.youtube.com/watch?feature=shared&t=475&v=1yvBqasHLZs">provocative analogy</a> that pretraining data is the “fossil fuel of AI” and that pretraining as we know it will end once this resource is exhausted in the near future.</d-footnote> This trend has sparked significant interest in a complementary approach: <em><strong>test-time compute scaling</strong></em>. Rather than relying on ever-larger pretraining budgets, test-time methods use dynamic inference strategies that allow models to “think longer” on harder problems. A prominent example is <a href="https://openai.com/index/learning-to-reason-with-llms/">OpenAI’s o1 model</a>, which shows consistent improvement on difficult math problems as one increases the amount of test-time compute:</p>

    <figure id="1581384e-bcac-805f-8c2b-dff4509f45cb" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/compute.png.webp"><img style="width:672px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/compute.png.webp"/></a></figure>

    <p id="1591384e-bcac-80ce-a40c-cf00e5a73720" class="">Although we don’t know how o1 was trained, recent research from DeepMind<d-cite key="gdm"></d-cite> shows that <em><strong>test-time compute can be scaled</strong></em> <em><strong>optimally</strong></em> through strategies like iterative self-refinement or using a reward model to perform search over the space of solutions. By adaptively allocating test-time compute per prompt, smaller models can rival—and sometimes even outperform—their larger, more resource-intensive counterparts. Scaling test-time compute is especially advantageous when memory is constrained and the available hardware is not sufficient to run a larger model. However, this promising approach was demonstrated with closed-source models, and no implementation details or code were released 😢.</p>

    <p id="1591384e-bcac-80c1-82a7-cc9a41f9fdc4" class="">Over the past months we’ve been diving deep in trying to reverse engineer and reproduce several of these results and are finally happy to share some of our knowledge. More precisely, in this blog post we’ll cover:</p>

    <ul>
        <li><strong>Compute-optimal scaling: </strong>How we implemented DeepMind's recipe to boost the mathematical capabilities of open models at test-time.</li>
        <li><strong>Diverse Verifier Tree Search (DVTS):</strong> An unpublished extension we developed to the verifier-guided tree search technique. This simple yet effective method improves diversity and delivers better performance, particularly at large test-time compute budgets.</li>
        <li>🧭 <strong>Search and Learn: </strong>A lightweight toolkit for implementing search strategies with LLMs and built for speed with vLLM. You can check it out <a href="https://github.com/huggingface/search-and-learn">here</a>.</li>
    </ul>

    <p id="15a1384e-bcac-804b-9470-f64abc63b648" class="">So how well does compute-optimal scaling work in practice? Check out this plot where the tiny 1B and 3B Llama Instruct models outperform their much larger 8B and 70B siblings on the challenging MATH-500 benchmark if you give them enough “time to think” 🤯:</p>

    <figure id="15b1384e-bcac-809d-9f85-ff0b5a83e1cc" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-thumbnail.png"><img style="width:707.9921875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-thumbnail.png"/></a></figure>

    <p id="15c1384e-bcac-804f-a4d5-c8f760f28096" class="">In the rest of this blog post, we’ll dive deep into the ingredients behind results like this one and walk you through practical strategies for implementing test-time compute scaling.</p>

    <!-- SECTION 1 -->
    <h2 id="1591384e-bcac-809f-b7ce-d414b4c0df4e" class="">Strategies for test-time compute scaling</h2>

    <p id="1591384e-bcac-8021-a784-d3340af0adb4" class="">There are two main strategies for scaling test-time compute:</p>

    <ul>
        <li><strong>Self-Refinement: </strong>Models iteratively refine their own outputs or “thoughts” by identifying and correcting errors in subsequent iterations. While effective on some tasks, this strategy usually requires models to have built-in mechanisms for self-refinement, which can limit its applicability.</li>
        <li><strong>Search Against a Verifier: </strong>This approach focuses on generating multiple candidate answers and using verifier to select the best one. A verifier can be anything from a hard-coded heuristic to a learned reward model, but for the purposes of this blog post we will focus on learned verifiers. It includes techniques such as Best-of-N sampling<d-footnote>This is also known as rejection sampling.</d-footnote> and tree search. Search strategies are more flexible and can adapt to the difficulty of the problem, although their performance is constrained by the quality of the verifier.</li>
    </ul>

    <p id="1591384e-bcac-801d-82e3-d2dc50cf2b24" class="">In this blog post, we’ll concentrate on search-based methods as they represent a practical and scalable solution for test-time compute optimization. In particular, we’ll examine the three strategies illustrated below:</p>

    <figure id="15b1384e-bcac-80df-a57e-e08ddb80ec8c" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/search-strategies.png"><img style="width:900px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/search-strategies.png"/></a>
        <figcaption>Illustration of various search strategies used in test-time compute scaling. Figure adapted from the DeepMind paper.<d-cite key="gdm"></d-cite></figcaption>
    </figure>

    <ul>
        <li><strong>Best-of-N: </strong>Generate multiple responses per problem and assign scores to each candidate answer, typically using a reward model. Then select the answer with the highest reward (or a weighted variant discussed later). This approach emphasizes answer quality over frequency.</li>
        <li><strong>Beam search: </strong>A systematic search method that explores the solution space, often combined with a <em>process reward model (PRM)</em><d-cite key="prm"></d-cite> to optimise both the sampling and evaluation of intermediate steps in problem-solving. Unlike conventional reward models that produce a single score on the final answer, PRMs provide a <em>sequence </em>of scores, one for each step of the reasoning process. This ability to provide fine-grained feedback makes PRMs a natural fit for search methods with LLMs.</li>
        <li><strong>Diverse verifier tree search (DVTS):</strong> An extension of beam search we developed that splits the initial beams into independent subtrees, which are then expanded greedily using a PRM.<d-footnote>DVTS is similar to <a href="https://huggingface.co/papers/1610.02424">diverse beam search (DBS)</a> with the main difference that beams share a common prefix in DBS and no sampling is used. DVTS is also similar to <a href="https://huggingface.co/papers/2306.09896">code repair trees</a>, although it is not restricted to code generation models and discrete verifiers. After the publication of this blog post, we were made aware of <a href="https://huggingface.co/papers/2405.03553">step-level beam search</a>, which is most similar to DVTS and uses a value head to predict the most promising steps instead of a PRM.</d-footnote>  This method improves solution diversity and overall performance, particularly with larger test-time compute budgets.</li>
    </ul>

    <p id="15a1384e-bcac-803c-bc89-ed15f18eafdc" class="">With an understanding of the key search strategies, let’s move on to how we evaluated them in practice.</p>

    <h2 id="15a1384e-bcac-80e2-8506-e130c2c69407" class="">Experimental setup</h2>

    <figure id="15c1384e-bcac-8096-9f01-f2c0d388ed1d" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/system.png"><img style="width:750px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/system.png"/></a></figure>

    <p id="15a1384e-bcac-802a-b133-e279ff948bd0" class="">As illustrated in the diagram above, our experimental setup involves a pipeline with the following steps:</p>

    <ol>
        <li>We begin by feeding a math problem to an LLM, which generates \(N\) <em><strong>partial solutions</strong></em>, e.g. an intermediate step in a derivation.</li>
        <li>Each step is scored by a PRM, which estimates the probability of each step to eventually reach the correct final answer. The steps and PRM scores are then used by a given search strategy to select which partial solutions should be further explored to generate the next round of intermediate steps.</li>
        <li>Once the search strategy terminates, the final candidate solutions are ranked by the PRM to produce the final answer.</li>
    </ol>

    <p id="15c1384e-bcac-80af-a31e-fd3330874674" class="">To compare various search strategies, we used the following open models and datasets:</p>

    <ul>
        <li><strong>Model:</strong> We used <code>meta-llama/Llama-3.2-1B-Instruct</code> as our primary model for scaling test-time compute. With 1B parameters, its lightweight nature enables fast iterations, and its unsaturated performance on math benchmarks makes it an ideal choice for highlighting the benefits of scaling.</li>
        <li><strong>Process reward model: </strong>To guide our search strategies, we used <code>RLHFlow/Llama3.1-8B-PRM-Deepseek-Data</code>, an 8B reward model that has been trained using <em>process supervision</em>. Process supervision is a training approach where models receive feedback on each step of their reasoning process, not just the final outcome. We picked this model since it belongs to the same model family as our policy and gave better results than other PRMs like <a href="https://huggingface.co/peiyi9979/math-shepherd-mistral-7b-prm">Math-Shepherd</a> we tested in this weight class.</li>
        <li><strong>Dataset: </strong>We evaluated on the<a href="https://huggingface.co/datasets/HuggingFaceH4/MATH-500"> MATH-500 subset</a> of the MATH benchmark,<d-cite key="math"></d-cite> a dataset released by OpenAI as part of their research<d-cite key="lightman2023letsverifystepstep"></d-cite> on process supervision. These math problems span seven subjects and are challenging for both humans and most LLMs. Take a look at the dataset viewer below to get a taste for the problem difficulty!</li>
    </ul>

    <iframe src="https://huggingface.co/datasets/HuggingFaceH4/MATH-500/embed/viewer/default/test" frameborder="0" width="100%" height="560px"></iframe>

    <aside>There are signs this benchmark is getting saturated by recent models like o1. This has prompted the creator of MATH (Dan Hendrycks) to start collecting a diverse set of extremely difficult problems as part of <a href="https://agi.safe.ai/submit">Humanity’s Last Exam.</a></aside>

    <p>We tested each search strategy across compute budgets ranging from 1 to 256 generations per prompt and ran the data-generation pipeline with five random seeds to estimate variance across runs. You can find the models and datasets from our analysis in this <a href="https://huggingface.co/collections/HuggingFaceH4/scaling-test-time-compute-with-open-models-675c3b475a0d6eb4528fec23">Hugging Face collection</a>.</p>

    <p>To warmup, we’ll begin with a simple baseline and progressively incorporate additional techniques to improve performance.</p>

    <!-- SECTION 2 -->
    <h2 id="1591384e-bcac-801a-9201-cd4f3b8dfe96" class="">Majority voting: a simple baseline</h2>

    <p>Majority voting—or self-consistency decoding<d-cite key="cot"></d-cite> if you want to be fancy—is the most straightforward method to aggregate an LLM’s outputs.<d-footnote>It’s also the most common sampling method used in the literature and is usually referred to as “maj@X” in tables and results.</d-footnote> As the name suggests, for a given math problem we generate \(N\) candidate solutions and pick the most frequent answer. For all our experiments we sampled up to \(N=256\) candidates with temperature \(T=0.8\) and generated up to 2048 tokens per problem.<d-footnote>We found that sampling with \(T=1.0\) would cause the model to generate Chinese characters midway through a solution and hurt performance.</d-footnote></p>

    <p id="15c1384e-bcac-8086-a0e7-e0ca93b5ea94" class="">One quirk with the MATH benchmark is that answers must be formatted in a LaTeX box like <code>\boxed{answer}</code> . We initially tried the following simple system prompt for Llama 3.2 1B</p>

    <script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js" integrity="sha512-7Z9J3l1+EYfeaPKcGXu3MS/7T+w19WtKQY/n+xzmw4hZhJ9tyYmcUS+4QqAlzhicE5LAfMQSF3iFTK9bQdTxXg==" crossorigin="anonymous" referrerPolicy="no-referrer"></script><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css" integrity="sha512-tN7Ec6zAFaVSG3TpNAKtk4DOHNpSwKHxxrsiw4GHKESGPs5njn/0sMCUMl2svV4wo4BK/rCP7juYz+zx+l6oeQ==" crossorigin="anonymous" referrerPolicy="no-referrer"/><pre id="15c1384e-bcac-8042-9b96-ff3d615bb9f0" class="code"><code class="language-Python">Please think step by step and put your final answer within \boxed{}.</code></pre>

    <p id="15c1384e-bcac-80d0-bca6-ffed38482a37" class="">but found the resulting accuracy with greedy decoding (\(T=0\)) to be far worse than the 30.6% that Meta reported in their <a href="https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/">release</a>. Luckily, Meta also <a href="https://huggingface.co/datasets/meta-llama/Llama-3.2-1B-Instruct-evals/viewer/Llama-3.2-1B-Instruct-evals__math__details">published</a> the prompts they used for their evals and switching our system prompt to theirs made all the difference:</p>

    <aside>We wish more AI labs followed this practice - it helps enormously with reproducibility. Props to Meta!</aside>

    <script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js" integrity="sha512-7Z9J3l1+EYfeaPKcGXu3MS/7T+w19WtKQY/n+xzmw4hZhJ9tyYmcUS+4QqAlzhicE5LAfMQSF3iFTK9bQdTxXg==" crossorigin="anonymous" referrerPolicy="no-referrer"></script><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css" integrity="sha512-tN7Ec6zAFaVSG3TpNAKtk4DOHNpSwKHxxrsiw4GHKESGPs5njn/0sMCUMl2svV4wo4BK/rCP7juYz+zx+l6oeQ==" crossorigin="anonymous" referrerPolicy="no-referrer"/><pre id="15c1384e-bcac-8011-aee6-d7c433df8b5f" class="code"><code class="language-Python">Solve the following math problem efficiently and clearly:

    - For simple problems (2 steps or fewer):
    Provide a concise solution with minimal explanation.

    - For complex problems (3 steps or more):
    Use this step-by-step format:

    ## Step 1: [Concise description]
    [Brief explanation and calculations]

    ## Step 2: [Concise description]
    [Brief explanation and calculations]

    ...

    Regardless of the approach, always conclude with:

    Therefore, the final answer is: $\boxed{answer}$. I hope it is correct.

    Where [answer] is just the final number or expression that solves the problem.</code></pre>

    <p id="15c1384e-bcac-8076-9664-caa1b098c89c" class="">One subtlety with evaluating answers to math problems is that strings like \(1/\sqrt{3}\) and \(\sqrt{3}/3\) are distinct, but represent mathematically equivalent answers. The standard way<d-cite key="minerva"></d-cite> to handle this is to convert the a pair of answers to SymPy objects and then check whether subtracting the two objects and applying <code>sympy.simplify</code> gives zero. </p><p id="15c1384e-bcac-8043-a1d3-ffa0c5c3406e" class="">While this approach works well when comparing a small number of candidate answers, we found it was terribly slow when comparing many pairs in a list of \(N\) candidates; in some cases, slower than generating the candidates in the first place!  To deal with this, we first reduced each answer to its <a href="https://en.wikipedia.org/wiki/Canonical_form">canonical form</a> and then computed the frequency of each form to determine the majority vote. Expand the detail below if you’re curious about how the code looks.</p>

    <details><summary style="font-weight:600;font-size:1.25em;line-height:1.3;margin:0"><strong>Implementation detail</strong></summary><div class="indented"><p id="15d1384e-bcac-8080-b676-e09848694520" class="">To obtain the canonical form of an algebraic expression, we first convert the LaTeX string to SymPy, apply <code>sympy.simplify</code>, and finally convert back to LaTeX: </p>
        <script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js" integrity="sha512-7Z9J3l1+EYfeaPKcGXu3MS/7T+w19WtKQY/n+xzmw4hZhJ9tyYmcUS+4QqAlzhicE5LAfMQSF3iFTK9bQdTxXg==" crossorigin="anonymous" referrerPolicy="no-referrer"></script><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css" integrity="sha512-tN7Ec6zAFaVSG3TpNAKtk4DOHNpSwKHxxrsiw4GHKESGPs5njn/0sMCUMl2svV4wo4BK/rCP7juYz+zx+l6oeQ==" crossorigin="anonymous" referrerPolicy="no-referrer"/><pre id="15b1384e-bcac-80c0-96c2-feba0d1cc92e" class="code"><code class="language-Python">from latex2sympy2 import latex2sympy
    from sympy import latex, simplify

    def get_canonical_form(expression: str) -&gt; str:
        parsed_expr = latex2sympy(expression)
        simplified_expr = simplify(parsed_expr)
        return latex(simplified_expr)</code></pre><p id="15c1384e-bcac-80ea-92cd-d199d25281b4" class="">With this function, we can then iterate over all candidate solutions in an list and keep track of how many times a canonical form has been seen before computing the final majority vote:</p><script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/prism.min.js" integrity="sha512-7Z9J3l1+EYfeaPKcGXu3MS/7T+w19WtKQY/n+xzmw4hZhJ9tyYmcUS+4QqAlzhicE5LAfMQSF3iFTK9bQdTxXg==" crossorigin="anonymous" referrerPolicy="no-referrer"></script><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css" integrity="sha512-tN7Ec6zAFaVSG3TpNAKtk4DOHNpSwKHxxrsiw4GHKESGPs5njn/0sMCUMl2svV4wo4BK/rCP7juYz+zx+l6oeQ==" crossorigin="anonymous" referrerPolicy="no-referrer"/><pre id="15d1384e-bcac-80f3-9062-c4f44a0d0b8b" class="code"><code class="language-Python">def find_majority_answer(answers: list[str]) -&gt; str:
        canonical_groups = defaultdict(int)
        canonical_to_original = {}

        for answer in answers:
            canonical_form = get_canonical_form(answer)

            # Increment count for the canonical form
            canonical_groups[canonical_form] += 1

            # Track the original answer for this canonical form
            if canonical_form not in canonical_to_original:
                canonical_to_original[canonical_form] = answer

        # Find the canonical form with the largest count
        max_count = max(canonical_groups.values())
        for canonical_form, count in canonical_groups.items():
            if count == max_count:
                # Return the first occurring group in case of a tie
                return canonical_to_original[canonical_form]</code></pre>

    <p id="15d1384e-bcac-804e-a99c-fe5e83313a3d" class="">This approach was significantly faster than checking each pair of solutions independently for equality.</p></div></details>

    <br><br>

    <p id="15b1384e-bcac-80f7-83e8-e1d6b360faa4" class="">Here’s how majority voting performs when applied to the generations from Llama 3.2 1B Instruct:</p><figure id="15b1384e-bcac-8072-9987-d80031b97793" class="image"><a href="Scaling%20test-time%20compute%20with%20open%20models%201531384ebcac800b9d73fca3503eb783/methods-maj.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-maj.png"/></a></figure><p id="15b1384e-bcac-8020-8688-fe1713e92c2b" class="">The results show that majority voting yields a significant improvement over the greedy decoding baseline, but its gains start to plateau after approximately \(N=64\) generations. This limitation arises because majority voting struggles with problems that require nuanced reasoning or tasks where errors are consistent across generations. If you’re also wondering why the majority voting accuracy is worse than the 0-shot CoT baseline for \(N=1\) and \(2\), that’s because we sample at \(T=0.8\), which makes it less likely we produce the correct answer among a handful of candidates.</p><p id="15b1384e-bcac-8075-8fef-f26f0b8e5559" class="">Building on the limitations of majority voting, let’s see how incorporating a reward model can enhance performance.</p>

    <!-- SECTION 3 -->
    <h2 id="1591384e-bcac-8098-9db5-f76c9ce00e7a" class="">Beyond majority: Best-of-N</h2>
    <p id="15b1384e-bcac-8019-9b5c-d11bae74628d" class="">Best-of-N is a simple, but effective extension to majority voting that uses a reward model to determine the most plausible answer. This method comes in two main variants:</p>

    <ul>
        <li><strong>Vanilla Best-of-N:</strong> Generate \(N\) independent responses and select the one with the <em>highest  RM reward</em> as the final answer. This ensures that the most confident individual response is chosen, but it doesn’t account for consistency across answers.</li>
        <li><strong>Weighted Best-of-N:</strong> Aggregate scores across all identical responses and select the answer with the <em>highest total reward</em>. This approach prioritises high-quality answers by boosting their scores through repeated occurrences. Mathematically, the weighting across answers \(a_i\) is performed as follows:

        $$ a_\mathrm{weighted} = \arg\max_{a} \sum_{i=1}^{N} \mathbb{I}(a_i = a) \cdot \mathrm{RM}(p, s_i) \,,$$

        where \(\mathrm{RM}(p, s_i)\) is the reward model score of the \(i\)-th solution \(s_i\) to problem \(p\). Here, a solution \(s_i\) refers to the concatenation of all steps from the model, while \(a_i\) refers to the final answer, typically extracted from <code>\boxed{answer}.</code></li>
    </ul>

    <p id="15d1384e-bcac-8012-8282-c0ed1215a611" class="">Typically, one usually uses an outcome reward model (ORM) to get a single, solution-level score. But to allow for fair comparison with the other search strategies discussed later, we will use the same PRM to score the solutions from Best-of-N. As illustrated below, PRMs produce a <em>cumulative</em> <em>sequence of step-level scores</em> per solution, so we need to perform a reduction over the steps to obtain a single solution-level score: </p><figure id="15d1384e-bcac-80d6-815f-c7d87fe313a6" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/prm-reductions.png"><img style="width:700px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/prm-reductions.png"/></a></figure><p id="15d1384e-bcac-80e7-8d1a-e0aab286f9f4" class="">In the literature, the most common reductions are the following:</p>

    <ul>
        <li><strong>Min: </strong>use the minimum score across all steps.</li>
        <li><strong>Prod: </strong>use the product of step-level scores.</li>
        <li><strong>Last: </strong>use the final score in the steps. This score contains the cumulative information from all prior steps, so treats the PRM effectively as an ORM that is able to score partial solutions.</li>
    </ul>

    <p id="15b1384e-bcac-80ad-96d1-d313ae3e1954" class="">We experimented with each reduction and found—like DeepMind—that <em><strong>“last” performs best for our choice of task and PRM</strong></em>. We use this aggregation throughout all of our experiments and you can expand the detail below to see how we implemented it, along with the weighting procedure discussed above.</p>

    <p id="15d1384e-bcac-809a-8aa8-c52ca7301b52" class="">Here’s the results one gets from applying both variants of Best-of-N:</p><figure id="15b1384e-bcac-808d-857e-d492683a4a91" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-maj-bon.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-maj-bon.png"/></a></figure><p id="15b1384e-bcac-8001-9320-ff788bab0c52" class="">The results reveal a clear advantage: <strong>weighted Best-of-N</strong> consistently outperforms vanilla Best-of-N, especially with larger generation budgets. Its ability to aggregate scores across identical responses ensures that even less frequent but higher-quality answers are effectively prioritized.</p><p id="15b1384e-bcac-808a-b3ff-ee08c05a20af" class="">However, despite these improvements, we’re still falling short of the performance achieved by the Llama 8B model and the Best-of-N approach is starting to plateau at \(N=256\) generations. Can we push the boundaries further by supervising the search process step-by-step? Let’s find out 🚀!</p>

    <!-- SECTION 4 -->
    <h2 id="1591384e-bcac-8065-a02c-cd760ebd6cd1" class="">Beam search with process reward models</h2>

    <p id="15a1384e-bcac-80e1-9e0e-c01f5f373805" class="">Beam search is a structured search method that systematically explores the solution space, making it a powerful tool for improving model outputs at test-time. When combined with a PRM, beam search can optimize both the generation and evaluation of intermediate steps in problem-solving. The way it works is as follows:</p>

    <ol>
        <li>Generate multiple candidate solutions <em>iteratively</em> by maintaining a fixed number of &quot;beams&quot; or active paths \(N\).</li>
        <li>In the first iteration, sample \(N\) independent steps from the LLM with temperature \(T\) to introduce diversity in the responses. These steps are usually defined by a stopping criterion like terminating on a new line <code>\n</code> or double new line <code>\n\n</code>.</li>
        <li>Score each step with the PRM and select the top \(N/M\) steps as candidates for the next round of generation. Here \(M\) denotes the “beam width” of a given active path. As in Best-of-N, we used the “last” reduction to score the partial solutions at each iteration.</li>
        <li>Expand the steps selected in step (3) by sampling \(M\) next steps in the solution.</li>
        <li>Repeat steps (3) and (4) until the EOS token is reached or the maximum search depth is exceeded.</li>
    </ol>

    <p id="15a1384e-bcac-8003-a9d9-da7f3a4dc321" class="">By allowing the PRM to evaluate the correctness of intermediate steps, beam search can identify and prioritize promising paths early in the process. This step-by-step evaluation is particularly beneficial for complex reasoning tasks like mathematics, where verifying partial solutions can significantly improve final outcomes.</p>

    <details><summary style="font-weight:600;font-size:1.25em;line-height:1.3;margin:0">Implementation detail</summary><div class="indented">
        <p id="15b1384e-bcac-8065-a739-d24b699106be" class="">When we implemented beam search with process supervision, we encountered two major footguns with the Llama 3 chat template that are worth mentioning:<d-footnote>These footguns cost us a few days of our lives, so we’re sharing them for the benefit of humanity.</d-footnote></p>

        <ul>
            <li>By default, the chat template trims trailing new lines from every assistant turn. As a result, if one uses <code>\n</code> or <code>\n\n</code> to terminate a step, these tokens are lost on subsequent steps and force the model to produce peculiar outputs.</li>
            <li>The chat template is prefixed with Llama’s BOS token. When the formatted string is fed to vLLM a <em>second</em> BOS token is added which completely ruins performance, even though the generations look mostly coherent 🤯. See this <a href="https://github.com/vllm-project/vllm/issues/9519">vLLM issue</a> for more details.</li>
        </ul>

        <p>The solution is to <a href="https://github.com/huggingface/search-and-learn/blob/27f273f7db648d6d3739f0a65a0f7ab1ce45888f/src/sal/config.py#L50">overwrite the Llama 3 chat template</a> to prevent trimming and exclude the BOS token prefix.</p>
    </div>
    </details>
    <br><br>

    <p id="15d1384e-bcac-80e9-8e65-e1b58080b94c" class="">In our experiments, we followed DeepMind’s hyperparameter choices<d-cite key="gdm"></d-cite> and ran beam search with the following:</p>

    <ul>
        <li>\(N\) beams in compute scalings of 4, 16, 64, 256</li>
        <li>Fixed beam width \(M=4\)</li>
        <li>Sampling with temperature \(T=0.8\)</li>
        <li style="list-style-type:disc">Up to 40 iterations, i.e. a tree of maximum depth with 40 steps.</li>
    </ul>

    <p id="15d1384e-bcac-8051-abe5-dc84c42a1b5f" class="">As shown below, the results are striking: with a test-time budget of \(N=4\), beam search achieves the same accuracy as Best-of-N for \(N=16\), i.e. it is 4x more compute efficient! Similarly, with \(N=16\), beam search achieves the same accuracy as Best-of-N for \(N=256\), making it 16x more compute efficient at larger \(N\).  Moreover, beam search matches the performance of Llama 3.1 8B with just \(N=32\) solutions per problem. The average performance on MATH by computer science PhD students is around 40%,  so reaching nearly 55% isn’t too bad for a 1B model 💪!</p><figure id="15b1384e-bcac-80e9-97fa-fe50d1811f5b" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-maj-bon-beam.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-maj-bon-beam.png"/></a></figure>

    <h3 id="15a1384e-bcac-800c-baee-fb99b242ef87" class="">Which problems does beam search solve best?</h3>

    <p id="15d1384e-bcac-80e3-938a-c3f09db2e9ff" class="">Although in aggregate it is clear that beam search is a better search strategy than Best-of-N or majority voting, the DeepMind paper showed that <em><strong>each strategy has tradeoffs that depend on the problem difficulty</strong></em> and test-time compute budget. </p><p id="15d1384e-bcac-8015-a8f0-c2323b9e535f" class="">To see which problems are best suited for which strategy, DeepMind computed a distribution over estimated problem difficulty, and then binned the results into quintiles. In other words, each problem is assigned one of 5 levels, where level 1 indicates easier problems and level 5 indicates the hardest ones. To estimate problem difficulty, DeepMind generated 2048 candidate solutions with standard sampling per problem and then proposed the following heuristics:</p>

    <ul>
        <li><strong>Oracle: </strong>use the ground truth labels to estimate the pass@1 score per problem. Bin the distribution of pass@1 scores to determine the quintiles.</li>
        <li><strong>Model: </strong>use the distribution of average PRM scores per problem to determine the quintiles. The intuition here is that harder problems will have lower scores.</li>
    </ul>

    <details><summary style="font-weight:600;font-size:1.25em;line-height:1.3;margin:0">Implementation detail</summary><div class="indented">
        <p>The pass@k metric measures the probability, computed over a set of problems, that at least one of the top \(k\) generated outputs for each problem contains the correct solution. In practice, computing pass@k naively leads to high variance; for example, if we compute pass@1 from a single completion per problem, we can get significantly different values from repeated evaluations due to sampling. To combat this, OpenAI's Codex paper<d-cite key="codex"></d-cite> introduced an unbiased estimator that accounts for the total number of generated samples \(n\), the number of correct samples \(c\), and the desired \(k\) value. The estimator is formulated as:

            $$\text{pass@k} = \mathbb{E}_{\text{problems}} \left[ 1 - \frac{\binom{n - c}{k}}{\binom{n}{k}} \right]$$

        This formula calculates the expected value over all problems and determines the likelihood that at least one of the top \(k\) samples is correct. The term \(\binom{n - c}{k}/\binom{n}{k}\) represents the probability of selecting \(k\) incorrect samples from the total, and subtracting from 1 gives the probability of having at least one correct sample among the top \(k\).<d-footnote>See the <a href="https://samuelalbanie.com/files/digest-slides/2022-07-codex.pdf?utm_source=chatgpt.com">wonderful notes</a> from Samuel Albanie for many more details on pass@k.</d-footnote></p>

        <p>However, computing the estimator directly suffers from numerical instabilities, so in practice one uses the following <a href="https://github.com/huggingface/search-and-learn/blob/27f273f7db648d6d3739f0a65a0f7ab1ce45888f/src/sal/utils/math.py#L230">simplified form:</a>
            <d-code block language="python">
                def pass_at_k(n: int, c: int, k: int) -> float:
    """A numerically stable method for calculating an unbiased estimate of pass@k.

    Taken from OpenAI's Codex paper: https://arxiv.org/abs/2107.03374

    Args:
        n (`int`): total number of samples
        c (`int`): number of correct samples
        k (`int`): k in pass@$k$

    Returns:
        `float`: an unbiased estimate of pass@k
    """
    if n - c < k:
        return 1.0
    return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
            </d-code>
        </p>


    </div>
    </details>
    <br><br>

    <p id="15d1384e-bcac-80a3-af7c-f3497126ab1e" class="">Here’s the breakdown of the various methods according to the pass@1 scores and across four test-time compute budgets of \(N = [4,16,64, 256]\):</p><figure id="15b1384e-bcac-80ad-9cf3-cf5bcbd3f53b" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/levels-maj-bon-beam.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/levels-maj-bon-beam.png"/></a></figure><p id="15d1384e-bcac-80c3-93b3-fa4c071ac807" class="">In this plot, each bar denotes a test-time compute budget, and within each bar we show the relative accuracy of each method. For example, in the group of four bars on difficulty level 2 we see that:</p>

    <ul>
        <li>Majority voting is the worst performer for all compute budgets, except for \(N=256\), where beam search is worst.</li>
        <li>Beam search is best for \(N=[4,16,64]\), but Best-of-N is best for \(N=256\).</li>
    </ul>

    <p id="15a1384e-bcac-80d4-af98-eaebf5fcf84e" class="">Although we see that beam search gives consistent gains in the medium and hard problems (levels 3-5), it tends to do worse than Best-of-N (and even majority voting!) on the simpler problems and especially at large compute budgets. </p><p id="15a1384e-bcac-805b-9949-f0cdc44c9e3c" class="">We realized from looking at the resulting trees produced by beam search, that if a single step is assigned high reward, then the whole tree collapses to that trace and thus diversity is impacted. This prompted us to explore an extension to beam search that maximises diversity - let’s take a look!</p>

    <!-- SECTION 5 -->
    <h2 id="1591384e-bcac-80d2-8234-fe0e9a4df59d" class="">DVTS: boosting performance with diversity</h2><p id="1591384e-bcac-8044-b7c5-cf39e4aed683" class="">As we saw above beam search gives strong performance over Best-of-N, but tends to underperform on simpler problems and at large test-time compute budgets. To address this, we developed an extension we call Diverse Verifier Tree Search (DVTS) that is designed to maximise diversity at large \(N\).</p><p id="15a1384e-bcac-80ff-a97b-c7ccd88958e4" class="">DVTS works in a similar fashion as beam search, with the following modifications:</p>

    <ol>
        <li>For a given \(N\) and \(M\), expand the initial set of beams into \(N/M\) <em>independent</em> subtrees.</li>
        <li>For each subtree, select the step with the highest PRM score.</li>
        <li>Generate \(M\) new steps from the nodes selected in step (2) and select the step with the highest PRM score.</li>
        <li>Repeat step (3) until the EOS token or maximum tree depth is reached.</li>
    </ol>

    <p id="15d1384e-bcac-8087-b916-d9603de035dd" class="">Here’s the results from applying DVTS to Llama 1B:</p><figure id="15b1384e-bcac-801c-a1e7-d4e544826da3" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-all.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-all.png"/></a></figure><p id="15b1384e-bcac-80e1-bc9b-dbdb5738b9f1" class="">As we can see, DVTS provides a complementary strategy to beam search: at small \(N\) beam search is more effective at finding correct solutions, but at large \(N\) the diversity of DVTS candidates kicks in and we get better performance. </p><p id="15d1384e-bcac-80a7-8379-dca3c329c433" class="">We can also see this manifested in the problem difficulty breakdown, where DVTS enhances performance on the easy / medium problems at large \(N\), while beam search is best at small \(N\) across model problem difficulties:</p><figure id="15b1384e-bcac-807a-8dca-f322077cc616" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/levels-all.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/levels-all.png"/></a></figure>

    <!-- SECTION 6 -->
    <h2 id="1591384e-bcac-806b-9dd0-c80a250c7754" class="">The best of all worlds: compute-optimal scaling</h2>

    <p>Armed with various search strategies, a natural question is which one is best? In the DeepMind paper, they proposed a <em><strong>compute-optimal</strong></em> <em><strong>scaling strategy</strong></em> where one selects the search method and hyperparameters \(\theta\) that achieves the <em><strong>best performance for a given compute budget </strong></em><em>\(N\)</em><em><strong>:</strong></em>

    $$\theta_{q,a^*(q)}^*(N) = \underset{\theta}{\arg\max} \left( \mathbb{E}_{y \sim \text{Target}(\theta, N, q)} \left[ \mathbb{1}_{y = y^*(q)} \right] \right),$$

    where \(y^*(q)\) is the ground-truth for question \(q\) and \(\theta_{q,a^*(q)}^*(N)\) denotes the compute-optimal scaling strategy. Since computing \(\theta_{q,a^*(q)}^*(N)\) directly is somewhat tricky, DeepMind proposed an approximation based on the <em><strong>problem difficulty</strong></em>, i.e. allocate test-time compute according to which search strategy achieves best performance for a given difficulty level.</p>

    <p id="15a1384e-bcac-80c9-a276-d5ea8974c543" class="">For example, on simpler problems and lower compute budgets, it is better to use strategies like Best-of-N, while on harder problems, beam search is the better choice. To implement this, for each method we compute the accuracy for a given difficulty level and test-time compute budget. And voila, we now have our compute-optimal curve!</p>

    <figure id="15b1384e-bcac-80b3-bc58-d20ba41d3950" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-opt.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-opt.png"/></a></figure>

    <!-- SECTION 7 -->
    <h2 id="1591384e-bcac-809a-96d2-e928398d159a" class="">Scaling up to larger models</h2>

    <p id="15a1384e-bcac-8078-86d7-f48c2146444e" class="">We also explored scaling up the compute-optimal recipe to Llama 3.2 3B Instruct to see at what point the benefits of the PRM fade in comparison to the policy’s own capacity. To our surprise, compute-optimal scaling works remarkably well, with the 3B model surpassing the performance of Llama 3.1 70B Instruct (22x it's size!):</p><figure id="15b1384e-bcac-80b3-bc58-d20ba41d3950" class="image"><a href="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-opt-3b.png"><img style="width:707.9891357421875px" src="https://huggingface.co/datasets/HuggingFaceH4/blogpost-images/resolve/main/methods-opt-3b.png"/></a></figure>

    <h2 id="15a1384e-bcac-809c-b5e7-eb92dadaebb4" class="">Where to go from here?</h2><p id="15b1384e-bcac-8052-91d7-d6e1f6f66e09" class="">This exploration of test-time compute scaling has revealed both the potential and the challenges of leveraging search-based methods. As we look ahead, several exciting directions emerge:</p>

    <ol>
        <li><strong>The Power of Strong Verifiers:</strong> Strong verifiers play a critical role in enhancing performance. However, their current limitations are apparent, as highlighted in benchmarks like ProcessBench.<d-cite key="processbench"></d-cite> Improving the robustness and generalization of verifiers will be crucial for advancing these methods.</li>
        <li><strong>The Challenge of Self-Verification:</strong> The ultimate goal—or &quot;holy grail&quot;—is achieving self-verification, where models can validate their own outputs autonomously. This approach appears to be what models like o1 are doing, but remains difficult to implement in practice. Unlike standard supervised fine-tuning (SFT), self-verification demands more nuanced strategies. The recent DeepMind paper on self-verification and <em>SCoRe</em> sheds light on this challenge and offers a pathway for future research.<d-cite key="score"></d-cite></li>
        <li><strong>Integrating “Thoughts” into the Process:</strong> Incorporating explicit intermediate steps or “thoughts” during generation could further enhance reasoning and decision-making. By integrating structured reasoning into the search process, we may unlock better performance on complex tasks.</li>
        <li><strong>Search as a Data Generation Tool:</strong> This method can also serve as a powerful data generation process, creating high-quality training datasets. For example, fine-tuning models like Llama 1B on correct traces produced by search could yield significant gains. This on-policy approach resembles techniques like ReST<d-cite key="rest"></d-cite> or V-STaR<d-cite key="vstar"></d-cite> but with the added benefits of search, offering a promising direction for iterative improvement.</li>
        <li><strong>A Call for More PRMs:</strong> Open process reward models (PRMs) are relatively rare, limiting their broader application. Developing and sharing more PRMs for different domains is a critical area where the community can contribute significantly.</li>
        <li><strong>Expanding Beyond Verifiable Domains:</strong> While current methods excel in domains like math and code, where solutions are inherently verifiable, extending these techniques to other areas remains a major challenge. How can we adapt these strategies for less structured or subjective tasks? This is a vital question for future exploration.</li>
    </ol>

    <p>We'd love to hear from you on your ideas or feedback in the <a href="https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute/discussions">discussions tab</a>!</p>

    <h2 id="15b1384e-bcac-8093-82c6-d9c951dc0bab" class="">Acknowledgements</h2><p id="15c1384e-bcac-80d5-8ad4-d7f6874404c5" class="">We are grateful to Charlie Snell and Aviral Kumar for many discussions about test-time compute scaling and for sharing implementation details from their work. We thank Chun Te Lee for designing the lovely banner and Thomas Wolf, Leandro von Werra, Colin Raffel, and Quentin Gallouédec for many helpful suggestions to improve the blog post. We also thank Hugo Larcher and Mathieu Morlon for continually optimising the Hugging Face Science Cluster to make the GPUs go brrr 🔥!</p>

</d-article>

<d-appendix>
    <d-bibliography src="bibliography.bib"></d-bibliography>
    <style>
      d-appendix .citation {
        font-size: 11px;
        line-height: 15px;
        border-left: 1px solid rgba(0, 0, 0, 0.1);
        padding-left: 18px;
        border: 1px solid rgba(0,0,0,0.1);
        background: rgba(0, 0, 0, 0.02);
        padding: 10px 18px;
        border-radius: 3px;
        color: rgba(150, 150, 150, 1);
        overflow: hidden;
        margin-top: -12px;
        white-space: pre-wrap;
        word-wrap: break-word;
      }
    </style>

    <h3 id="citation">Citation</h3>
    <p>For attribution in academic contexts, please cite this work as</p>
    <pre class="citation short">Beeching, Edward, and Tunstall, Lewis, and Rush, Sasha, "Scaling test-time compute with open models.", 2024.</pre>
    <p>BibTeX citation</p>
    <pre class="citation long">@misc{beeching2024scalingtesttimecompute,
      title={Scaling test-time compute with open models},
      author={Edward Beeching and Lewis Tunstall and Sasha Rush},
      url={https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute},
}</pre>
</d-appendix>

<script>
    const article = document.querySelector('d-article');
    const toc = document.querySelector('d-contents');
    if (toc) {
        const headings = article.querySelectorAll('h2, h3, h4');
        let ToC = `<nav role="navigation" class="l-text figcaption"><h3>Table of contents</h3>`;
        let prevLevel = 0;

        for (const el of headings) {
            // should element be included in TOC?
            const isInTitle = el.parentElement.tagName == 'D-TITLE';
            const isException = el.getAttribute('no-toc');
            if (isInTitle || isException) continue;
            el.setAttribute('id', el.textContent.toLowerCase().replaceAll(" ", "_"))
            const link = '<a target="_self" href="' + '#' + el.getAttribute('id') + '">' + el.textContent + '</a>';

            const level = el.tagName === 'H2' ? 0 : (el.tagName === 'H3' ? 1 : 2);
            while (prevLevel < level) {
                ToC += '<ul>'
                prevLevel++;
            }
            while (prevLevel > level) {
                ToC += '</ul>'
                prevLevel--;
            }
            if (level === 0)
                ToC += '<div>' + link + '</div>';
            else
                ToC += '<li>' + link + '</li>';
        }

        while (prevLevel > 0) {
            ToC += '</ul>'
            prevLevel--;
        }
        ToC += '</nav>';
        toc.innerHTML = ToC;
        toc.setAttribute('prerendered', 'true');
        const toc_links = document.querySelectorAll('d-contents > nav a');

        window.addEventListener('scroll', (_event) => {
            if (typeof (headings) != 'undefined' && headings != null && typeof (toc_links) != 'undefined' && toc_links != null) {
                // Then iterate forwards, on the first match highlight it and break
                find_active: {
                    for (let i = headings.length - 1; i >= 0; i--) {
                        if (headings[i].getBoundingClientRect().top - 50 <= 0) {
                            if (!toc_links[i].classList.contains("active")) {
                                toc_links.forEach((link, _index) => {
                                    link.classList.remove("active");
                                });
                                toc_links[i].classList.add('active');
                            }
                            break find_active;
                        }
                    }
                    toc_links.forEach((link, _index) => {
                        link.classList.remove("active");
                    });
                }
            }
        });
    }
</script>
</body>
</html>