from transformers import AutoTokenizer, AutoModelForCausalLM from dotenv import load_dotenv import streamlit as st import os import torch load_dotenv() # Load the pre-trained model and tokenizer model_name = "microsoft/DialoGPT-small" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Function to get responses from the model def get_model_response(question): input_ids = tokenizer.encode(question, return_tensors='pt') output = model.generate(input_ids, max_length=200, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1) response = tokenizer.decode(output[0], skip_special_tokens=True) return response # Initializing Streamlit Application st.set_page_config(page_title="QuerySage") st.header("QuerySage") input_text = st.text_input("Input: ", key="input") submit = st.button("Inquire") # If clicked Ask Button if submit: response = get_model_response(input_text) st.subheader("The Response is ") st.write(response)