littel / app.py
Omarlittel's picture
Update app.py
fc0755f verified
raw
history blame contribute delete
859 Bytes
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
model = AutoModelForCausalLM.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
# Define a function to generate responses
def generate_response(input_text):
system_prompt = "Think step by step with logical reasoning and intellectual sense before you provide any response."
input_text = system_prompt + '\n' + input_text
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(**inputs, max_length=150)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Set up Gradio interface
iface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
iface.launch()