# app_functions.py from transformers import AutoModelForCausalLM, AutoTokenizer def Get_DialoGPT_Response(input_text, no_words, user_type): model_name = "Rabbiaaa/DialoGPT" try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) prompt = f"Give an answer for {user_type} of the text given that is '{input_text}' within {no_words} words." inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_new_tokens=int(no_words), do_sample=True, top_k=50) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response except Exception as e: return f"Error during DialoGPT model execution: {str(e)}" def Get_DistilGPT_Response(input_text, no_words, user_type): model_name = "Rabbiaaa/DistilGPT" try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) prompt = f"Give an answer for {user_type} of the text given that is '{input_text}' within {no_words} words." inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_new_tokens=int(no_words), do_sample=True, top_k=50) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response except Exception as e: return f"Error during DistilGPT model execution: {str(e)}" def Get_MedGPT_Response(input_text, no_words, user_type): model_name = "Rabbiaaa/MedGPT" try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) prompt = f"Give an answer for {user_type} of the text given that is '{input_text}' within {no_words} words." inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_new_tokens=int(no_words), do_sample=True, top_k=50) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response except Exception as e: return f"Error during MedGPT model execution: {str(e)}"