Spaces:
Sleeping
Sleeping
BeveledCube
commited on
Commit
•
2d6682d
1
Parent(s):
5525b46
erm
Browse files- main.py +1 -1
- models/gpt2.py +1 -2
main.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from flask import Flask, request, render_template, jsonify
|
2 |
-
from models import
|
3 |
|
4 |
app = Flask("AI API")
|
5 |
|
|
|
1 |
from flask import Flask, request, render_template, jsonify
|
2 |
+
from models import gpt2 as chatbot
|
3 |
|
4 |
app = Flask("AI API")
|
5 |
|
models/gpt2.py
CHANGED
@@ -13,9 +13,8 @@ def load():
|
|
13 |
def generate(input_text):
|
14 |
# Tokenize the input text
|
15 |
input_ids = tokenizer.encode(input_text, return_tensors="pt", truncation=True)
|
16 |
-
attention_mask = tf.ones_like(input_ids)
|
17 |
|
18 |
# Generate output using the model
|
19 |
-
output_ids = model.generate(input_ids, num_beams=3, no_repeat_ngram_size=2, max_new_tokens=
|
20 |
|
21 |
return tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
|
13 |
def generate(input_text):
|
14 |
# Tokenize the input text
|
15 |
input_ids = tokenizer.encode(input_text, return_tensors="pt", truncation=True)
|
|
|
16 |
|
17 |
# Generate output using the model
|
18 |
+
output_ids = model.generate(input_ids, num_beams=3, no_repeat_ngram_size=2, max_new_tokens=200, eos_token_id=tokenizer.eos_token_id)
|
19 |
|
20 |
return tokenizer.decode(output_ids[0], skip_special_tokens=True)
|