import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "riskyhomo/Ayn_Rand_BB" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # Function to generate responses using the model def generate_response(user_input): # Tokenize the user input inputs = tokenizer(user_input, return_tensors="pt") # Generate response from the model with torch.no_grad(): outputs = model.generate(inputs.input_ids, max_length=500, pad_token_id=tokenizer.eos_token_id) # Decode the response response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Set up the Gradio interface iface = gr.Interface( fn=generate_response, # Function to generate responses inputs="text", # Input type is text outputs="text", # Output type is text title="Chatbot", # Title of the interface description="A chatbot trained with a language model.", # Description theme="default" # Gradio theme, can be "default", "dark", or "light" ) # Launch the app if __name__ == "__main__": iface.launch()