nroggendorff commited on
Commit
d07345c
·
verified ·
1 Parent(s): a69536a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -1,14 +1,12 @@
1
  import gradio as gr
2
  import os
3
- import spaces
4
  import torch
5
- from transformers import AutoTokenizer, AutoModelForChatGPT
6
 
7
  model_path = "cognitivecomputations/dolphin-2.7-mixtral-8x7b"
8
  tokenizer = AutoTokenizer.from_pretrained(model_path)
9
- model = AutoModelForChatGPT.from_pretrained(model_path)
10
 
11
- @spaces.GPU
12
  def chat(prompt):
13
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
14
  output = model.generate(input_ids, max_length=1024, num_return_sequences=1, top_p=0.9, top_k=50, num_beams=2, early_stopping=True)
@@ -17,7 +15,7 @@ def chat(prompt):
17
 
18
  demo = gr.Interface(
19
  fn=chat,
20
- inputs=gr.Textbox(value="Hello!", lines=5),
21
  outputs=gr.Textbox(label="Bot's Response", lines=5)
22
  )
23
 
 
1
  import gradio as gr
2
  import os
 
3
  import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
  model_path = "cognitivecomputations/dolphin-2.7-mixtral-8x7b"
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
+ model = AutoModelForCausalLM.from_pretrained(model_path)
9
 
 
10
  def chat(prompt):
11
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
12
  output = model.generate(input_ids, max_length=1024, num_return_sequences=1, top_p=0.9, top_k=50, num_beams=2, early_stopping=True)
 
15
 
16
  demo = gr.Interface(
17
  fn=chat,
18
+ inputs=gr.Textbox(placeholder="Enter your message here", lines=5),
19
  outputs=gr.Textbox(label="Bot's Response", lines=5)
20
  )
21