Update app.py
Browse files
app.py
CHANGED
@@ -104,20 +104,20 @@ def process():
|
|
104 |
|
105 |
# Generate text
|
106 |
with torch.no_grad():
|
107 |
-
|
108 |
-
|
109 |
#To Here
|
110 |
|
111 |
-
outputs = model.generate(input_ids,
|
112 |
-
min_length = 20,
|
113 |
-
max_new_tokens = 600,
|
114 |
-
length_penalty = 1.0, # Set to values < 1.0 in order to encourage the model to generate shorter answers.
|
115 |
-
num_beams = 10,
|
116 |
-
no_repeat_ngram_size = 3,
|
117 |
-
temperature = 0,
|
118 |
-
top_k = 150, # default 50
|
119 |
-
top_p = 0.92,
|
120 |
-
repetition_penalty = 2.1)
|
121 |
-
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
122 |
st.write(GoogleTranslator(source='en', target='hy').translate(generated_text))
|
123 |
process()
|
|
|
104 |
|
105 |
# Generate text
|
106 |
with torch.no_grad():
|
107 |
+
outputs = model.generate(input_ids)
|
108 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
109 |
#To Here
|
110 |
|
111 |
+
#outputs = model.generate(input_ids,
|
112 |
+
#min_length = 20,
|
113 |
+
#max_new_tokens = 600,
|
114 |
+
#length_penalty = 1.0, # Set to values < 1.0 in order to encourage the model to generate shorter answers.
|
115 |
+
#num_beams = 10,
|
116 |
+
#no_repeat_ngram_size = 3,
|
117 |
+
#temperature = 0,
|
118 |
+
#top_k = 150, # default 50
|
119 |
+
#top_p = 0.92,
|
120 |
+
#repetition_penalty = 2.1)
|
121 |
+
#generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
122 |
st.write(GoogleTranslator(source='en', target='hy').translate(generated_text))
|
123 |
process()
|