geekyrakshit commited on
Commit
a120af4
·
verified ·
1 Parent(s): 20aadca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -3
app.py CHANGED
@@ -1,7 +1,52 @@
 
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  demo.launch()
 
1
+ import spaces
2
  import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
4
 
5
+ model_id = "PhysicsWallahAI/Aryabhata-1.0"
 
6
 
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(model_id)
9
+
10
+ stop_strings = [
11
+ "<|im_end|>",
12
+ "<|end|>",
13
+ "<im_start|>",
14
+ "⁠```python\n",
15
+ "⁠<|im_start|>",
16
+ "]}}]}}]",
17
+ ]
18
+
19
+
20
+ def strip_bad_tokens(s, stop_strings):
21
+ for suffix in stop_strings:
22
+ if s.endswith(suffix):
23
+ return s[: -len(suffix)]
24
+ return s
25
+
26
+
27
+ generation_config = GenerationConfig(max_new_tokens=4096, stop_strings=stop_strings)
28
+
29
+
30
+ @spaces.GPU
31
+ def greet(prompt: str):
32
+ messages = [
33
+ {
34
+ "role": "system",
35
+ "content": "Think step-by-step; put only the final answer inside \\boxed{}.",
36
+ },
37
+ {"role": "user", "content": prompt},
38
+ ]
39
+ text = tokenizer.apply_chat_template(
40
+ messages, tokenize=False, add_generation_prompt=True
41
+ )
42
+ inputs = tokenizer([text], return_tensors="pt")
43
+ outputs = model.generate(
44
+ **inputs, generation_config=generation_config, tokenizer=tokenizer
45
+ )
46
+ return strip_bad_tokens(
47
+ tokenizer.decode(outputs[0], skip_special_tokens=True), stop_strings
48
+ )
49
+
50
+
51
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text", title="Aryabhatta Demo")
52
  demo.launch()