File size: 1,697 Bytes
23a6dc1
af8384f
 
743067f
e6d1128
23a6dc1
 
6e93ad1
46bcc72
 
e6d1128
2e7fa77
 
 
af8384f
2e7fa77
 
 
 
 
 
 
 
 
 
6e93ad1
2e7fa77
 
6e93ad1
2e7fa77
 
 
 
23c5a04
23a6dc1
 
 
 
 
 
2e7fa77
64fa836
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import json

app = Flask(__name__)

# Load DialoGPT model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")

# Load courses data from JSON file
with open("uts_courses.json", "r") as file:
    courses_data = json.load(file)

def generate_response(user_input):
    if user_input.lower() == "help":
        return "I can help you with information about UTS courses. Feel free to ask!"
    elif user_input.lower() == "exit":
        return "Goodbye!"
    elif user_input.lower() == "list courses":
        course_list = "\n".join([f"{category}: {', '.join(courses)}" for category, courses in courses_data["courses"].items()])
        return f"Here are the available courses:\n{course_list}"
    elif user_input.lower() in courses_data["courses"]:
        return f"The courses in {user_input} are: {', '.join(courses_data['courses'][user_input])}"
    else:
        # Tokenize the user input
        input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
        # Generate a response
        response_ids = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
        # Decode the response
        response = tokenizer.decode(response_ids[0], skip_special_tokens=True)
        return response

@app.route("/", methods=["POST"])
def chat():
    user_input = request.json["user_input"]
    response = generate_response(user_input)
    return jsonify({"response": response})

if __name__ == "__main__":
    app.run(debug=True)