Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import asyncio
|
|
8 |
|
9 |
description = """## Compare Creative Writing: Standard Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
10 |
This is a demo of the [Backtrack Sampler](https://github.com/Mihaiii/backtrack_sampler) framework using "Creative Writing Strategy".
|
11 |
-
<br />On the left
|
12 |
"""
|
13 |
|
14 |
model_name = "unsloth/Llama-3.2-1B-Instruct"
|
@@ -18,10 +18,9 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
18 |
model1 = AutoModelForCausalLM.from_pretrained(model_name).to("cuda")
|
19 |
|
20 |
model2 = AutoModelForCausalLM.from_pretrained(model_name)
|
21 |
-
device = torch.device('cuda')
|
22 |
|
23 |
-
strategy = CreativeWritingStrategy()
|
24 |
provider = TransformersProvider(model2, tokenizer, device)
|
|
|
25 |
creative_sampler = BacktrackSampler(strategy, provider)
|
26 |
|
27 |
def create_chat_template_messages(history, prompt):
|
|
|
8 |
|
9 |
description = """## Compare Creative Writing: Standard Sampler vs. Backtrack Sampler with Creative Writing Strategy
|
10 |
This is a demo of the [Backtrack Sampler](https://github.com/Mihaiii/backtrack_sampler) framework using "Creative Writing Strategy".
|
11 |
+
<br />On the left is the output of the standard sampler and on the right the output privided by Backtrack Sampler.
|
12 |
"""
|
13 |
|
14 |
model_name = "unsloth/Llama-3.2-1B-Instruct"
|
|
|
18 |
model1 = AutoModelForCausalLM.from_pretrained(model_name).to("cuda")
|
19 |
|
20 |
model2 = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
21 |
|
|
|
22 |
provider = TransformersProvider(model2, tokenizer, device)
|
23 |
+
strategy = CreativeWritingStrategy(provider)
|
24 |
creative_sampler = BacktrackSampler(strategy, provider)
|
25 |
|
26 |
def create_chat_template_messages(history, prompt):
|