Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -42,13 +42,21 @@ def generate(prompt):
|
|
42 |
response = generated_text.strip()
|
43 |
yield response
|
44 |
|
45 |
-
with gr.Blocks() as demo:
|
46 |
gr.Markdown("""
|
47 |
# GPT2 Amharic
|
48 |
-
This is a demo for a smaller version of the gpt2 decoder transformer model pretrained for 1.5 days on `290 million` tokens of **Amharic** text. The context size of
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
""")
|
50 |
|
51 |
-
prompt = gr.Textbox(label="Prompt", placeholder="Enter prompt here", lines=4, interactive=True)
|
52 |
with gr.Row():
|
53 |
with gr.Column():
|
54 |
gen = gr.Button("Generate")
|
|
|
42 |
response = generated_text.strip()
|
43 |
yield response
|
44 |
|
45 |
+
with gr.Blocks(css="#prompt_textbox textarea {color: blue}") as demo:
|
46 |
gr.Markdown("""
|
47 |
# GPT2 Amharic
|
48 |
+
This is a demo for a smaller version of the gpt2 decoder transformer model pretrained for 1.5 days on `290 million` tokens of **Amharic** text. The context size of [gpt2-small-amharic](https://huggingface.co/rasyosef/gpt2-small-amharic-128-v3) is 128 tokens. This is a base model and hasn't undergone any supervised finetuing yet.
|
49 |
+
|
50 |
+
Please **enter a prompt** and click the **Generate** button to generate completions for the prompt.
|
51 |
+
#### Text generation parameters:
|
52 |
+
- `temperature` : **0.8**
|
53 |
+
- `do_sample` : **True**
|
54 |
+
- `top_k` : **8**
|
55 |
+
- `top_p` : **0.8**
|
56 |
+
- `repetition_penalty` : **1.25**
|
57 |
""")
|
58 |
|
59 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Enter prompt here", lines=4, interactive=True, elem_id="prompt_textbox")
|
60 |
with gr.Row():
|
61 |
with gr.Column():
|
62 |
gen = gr.Button("Generate")
|