Enrico Shippole commited on
Commit
17d1f44
1 Parent(s): b54a00d

Add initial gradio setup

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -6,7 +6,12 @@ import gradio as gr
6
  def generate(prompt, seq_len, temperature, filter_thres, model):
7
  device = torch.device("cpu")
8
 
9
- model = torch.hub.load("conceptofmind/PaLM", "palm_1b_8k_v0", map_location=device).to(device).eval()
 
 
 
 
 
10
 
11
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
12
 
@@ -25,7 +30,9 @@ def generate(prompt, seq_len, temperature, filter_thres, model):
25
 
26
  decoded_output = tokenizer.batch_decode(output_tensor, skip_special_tokens=True)
27
 
28
- return decoded_output
 
 
29
 
30
  iface = gr.Interface(
31
  fn=generate,
 
6
  def generate(prompt, seq_len, temperature, filter_thres, model):
7
  device = torch.device("cpu")
8
 
9
+ model = PaLM(
10
+ num_tokens=50304, dim=1024, depth=24, dim_head=128, heads=8, flash_attn=False, qk_rmsnorm = False,
11
+ ).to(device).eval()
12
+
13
+ checkpoint = torch.load('./palm_410m_8k_v0.pt', map_location=device)
14
+ model.load_state_dict(checkpoint)
15
 
16
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
17
 
 
30
 
31
  decoded_output = tokenizer.batch_decode(output_tensor, skip_special_tokens=True)
32
 
33
+ return decoded_output[0]
34
+
35
+
36
 
37
  iface = gr.Interface(
38
  fn=generate,