DR-Rakshitha commited on
Commit
c928ad3
·
1 Parent(s): 26326a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -1,15 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
- # # Specify the directory containing the tokenizer's configuration file (config.json)
5
- model_name = "wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin"
 
6
 
7
- # # Initialize the GPT4All model
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
9
 
10
  def generate_text(input_text):
11
- text= model.generate(input_text)
12
- return text
 
 
13
 
14
  text_generation_interface = gr.Interface(
15
  fn=generate_text,
@@ -19,3 +23,7 @@ text_generation_interface = gr.Interface(
19
  outputs=gr.outputs.Textbox(label="Generated Text"),
20
  title="GPT-4 Text Generation",
21
  ).launch()
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Specify the directory containing the model and tokenizer
5
+ model_name = "gpt4all" # Make sure this matches the actual model directory
6
+ model_path = f"./{wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin}" # Path to the model directory
7
 
8
+ # Initialize the GPT-4 model and tokenizer
9
+ model = AutoModelForCausalLM.from_pretrained(model_path)
10
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
11
 
12
  def generate_text(input_text):
13
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
14
+ generated_ids = model.generate(input_ids)
15
+ generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
16
+ return generated_text
17
 
18
  text_generation_interface = gr.Interface(
19
  fn=generate_text,
 
23
  outputs=gr.outputs.Textbox(label="Generated Text"),
24
  title="GPT-4 Text Generation",
25
  ).launch()
26
+
27
+
28
+
29
+ # model_name = ""