import json from transformers import pipeline import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer # Download NLTK resources nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') # Load the JSON data from the file with open('uts_courses.json') as f: data = json.load(f) # Load the question-answering pipeline qa_pipeline = pipeline("question-answering") # Define stop words and lemmatizer stop_words = set(stopwords.words('english')) lemmatizer = WordNetLemmatizer() # Function to preprocess user input def preprocess_input(user_input): tokens = word_tokenize(user_input.lower()) filtered_tokens = [lemmatizer.lemmatize(word) for word in tokens if word.isalnum() and word not in stop_words] return " ".join(filtered_tokens) # Function to find courses by field of study def find_courses_by_field(field): if field in data['courses']: return data['courses'][field] else: return [] # Function to handle user input and generate responses def generate_response(user_input): user_input = preprocess_input(user_input) if user_input == 'exit': return "Exiting the program." elif "courses" in user_input and "available" in user_input: field = user_input.split("in ")[1] courses = find_courses_by_field(field) if courses: response = f"Courses in {field}: {', '.join(courses)}" else: response = f"No courses found in {field}." else: answer = qa_pipeline(question=user_input, context=data) response = answer['answer'] return response # Main function to interact with the user def main(): print("Welcome! I'm the UTS Course Chatbot. How can I assist you today?") print("You can ask questions about UTS courses or type 'exit' to end the conversation.") while True: user_input = input("You: ") response = generate_response(user_input) print("Bot:", response) if response == "Exiting the program.": break if __name__ == "__main__": main()