Omarlittel commited on
Commit
76df1a1
·
verified ·
1 Parent(s): c415dc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -1,14 +1,14 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
 
3
 
4
- # Load the model and tokenizer from Hugging Face
5
  tokenizer = AutoTokenizer.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
6
  model = AutoModelForCausalLM.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
7
 
8
  # Define a function to generate responses
9
  def generate_response(input_text):
10
- # Add a system prompt for improved responses
11
- system_prompt = "Think step by step with a logical reasoning and intellectual sense before you provide any response."
12
  input_text = system_prompt + "\n" + input_text
13
 
14
  inputs = tokenizer(input_text, return_tensors="pt")
@@ -16,6 +16,12 @@ def generate_response(input_text):
16
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
  return response
18
 
19
- # Set up the Gradio interface
20
- iface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
 
 
 
 
 
 
21
  iface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the model and tokenizer
5
  tokenizer = AutoTokenizer.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
6
  model = AutoModelForCausalLM.from_pretrained("Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2")
7
 
8
  # Define a function to generate responses
9
  def generate_response(input_text):
10
+ # Optional: Add a system prompt for improved responses
11
+ system_prompt = "Think step by step with logical reasoning and intellectual sense before you provide any response."
12
  input_text = system_prompt + "\n" + input_text
13
 
14
  inputs = tokenizer(input_text, return_tensors="pt")
 
16
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
  return response
18
 
19
+ # Set up Gradio interface with a single input and output
20
+ iface = gr.Interface(
21
+ fn=generate_response,
22
+ inputs=gr.Textbox(label="Input Text", placeholder="Type your message here..."),
23
+ outputs=gr.Textbox(label="Output"),
24
+ live=True
25
+ )
26
+
27
  iface.launch()