|
from flask import Flask, render_template, request |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
model = GPT2LMHeadModel.from_pretrained("./fine_tuned_model") |
|
tokenizer = GPT2Tokenizer.from_pretrained("./fine_tuned_model") |
|
|
|
|
|
def generate_feedback(user_input): |
|
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, padding=True, max_length=512) |
|
outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1) |
|
feedback = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return feedback |
|
|
|
@app.route('/') |
|
def index(): |
|
return render_template('behavioral.html') |
|
|
|
@app.route('/submit_answer', methods=['POST']) |
|
def submit_answer(): |
|
|
|
user_input = request.form['answer'] |
|
|
|
|
|
feedback = generate_feedback(user_input) |
|
|
|
|
|
return render_template('behavioral.html', feedback=feedback) |
|
|
|
if __name__ == '__main__': |
|
app.run(debug=True) |
|
|