Spaces:
Sleeping
Sleeping
curelycue
commited on
Commit
•
971836c
1
Parent(s):
197a245
Added repetition penalty
Browse files- app.py +16 -7
- requirements.txt +1 -0
app.py
CHANGED
@@ -8,30 +8,39 @@ model_name = "waterdrops0/mistral-nouns400"
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
|
10 |
|
11 |
-
def generate_text(prompt, max_length=50, temperature=0.7):
|
|
|
12 |
inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
|
|
|
|
|
13 |
outputs = model.generate(
|
14 |
inputs,
|
15 |
-
max_length=max_length,
|
16 |
temperature=temperature,
|
|
|
17 |
do_sample=True,
|
18 |
top_p=0.95,
|
19 |
top_k=60
|
20 |
)
|
21 |
-
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
22 |
-
return text
|
23 |
|
24 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
iface = gr.Interface(
|
26 |
fn=generate_text,
|
27 |
inputs=[
|
28 |
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
|
29 |
gr.Slider(10, 200, step=10, value=50, label="Max Length"),
|
30 |
-
gr.Slider(0.1, 1.0, step=0.1, value=0.7, label="Temperature")
|
|
|
31 |
],
|
32 |
outputs=gr.Textbox(label="Generated Text"),
|
33 |
title="Mistral 7B Nouns Model",
|
34 |
-
description="Generate text using the fine-tuned Mistral 7B model."
|
35 |
)
|
36 |
|
37 |
if __name__ == "__main__":
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
|
10 |
|
11 |
+
def generate_text(prompt, max_length=50, temperature=0.7, repetition_penalty=1.2):
|
12 |
+
# Encode the input prompt
|
13 |
inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
|
14 |
+
|
15 |
+
# Generate output based on the prompt with repetition penalty
|
16 |
outputs = model.generate(
|
17 |
inputs,
|
18 |
+
max_length=max_length + inputs.shape[1], # Ensuring generated text extends beyond the input prompt
|
19 |
temperature=temperature,
|
20 |
+
repetition_penalty=repetition_penalty, # Add repetition penalty
|
21 |
do_sample=True,
|
22 |
top_p=0.95,
|
23 |
top_k=60
|
24 |
)
|
|
|
|
|
25 |
|
26 |
+
# Decode the generated tokens, skipping the input tokens
|
27 |
+
generated_tokens = outputs[0, inputs.shape[1]:] # Only get the new tokens
|
28 |
+
generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
29 |
+
|
30 |
+
return generated_text
|
31 |
+
|
32 |
+
# Update the Gradio interface to include repetition penalty slider
|
33 |
iface = gr.Interface(
|
34 |
fn=generate_text,
|
35 |
inputs=[
|
36 |
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
|
37 |
gr.Slider(10, 200, step=10, value=50, label="Max Length"),
|
38 |
+
gr.Slider(0.1, 1.0, step=0.1, value=0.7, label="Temperature"),
|
39 |
+
gr.Slider(1.0, 2.0, step=0.1, value=1.2, label="Repetition Penalty") # Add a slider for repetition penalty
|
40 |
],
|
41 |
outputs=gr.Textbox(label="Generated Text"),
|
42 |
title="Mistral 7B Nouns Model",
|
43 |
+
description="Generate text using the fine-tuned Mistral 7B model with repetition penalty."
|
44 |
)
|
45 |
|
46 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
gradio
|
2 |
transformers
|
3 |
torch
|
|
|
1 |
+
huggingface_hub==0.22.2
|
2 |
gradio
|
3 |
transformers
|
4 |
torch
|