minhdang commited on
Commit
75352cb
1 Parent(s): 600ae0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -0
app.py CHANGED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from threading import Thread
4
+ from typing import Iterator
5
+ import accelerate
6
+ import bitsandbytes
7
+ import gradio as gr
8
+ import spaces
9
+ import torch
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
11
+
12
+ MAX_MAX_NEW_TOKENS = 2048
13
+ DEFAULT_MAX_NEW_TOKENS = 1024
14
+ total_count=0
15
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
16
+
17
+
18
+ DESCROPTION="""CODE"""
19
+
20
+ model_id = "deepseek-ai/deepseek-coder-33b-instruct"
21
+ model = AutoModelForCausalLM.from_pretrained(model_id,load_in_4bit=True,device_map="auto")
22
+ tokenizer=AutoTokenizer.from_pretrained(model_id)
23
+ tokenizer.use_defaul_system_prompt=True
24
+
25
+
26
+ @spaces.GPU
27
+ def gen(
28
+ message: str,
29
+ chat_history: list[tuple[str, str]],
30
+ system_prompt: str,
31
+ max_new_tokens: int = 1024,
32
+ temperature: float = 0.6,
33
+ top_p: float = 0.9,
34
+ top_k: int = 50,
35
+ repetition_penalty: float = 1,
36
+
37
+ )->Iterator[str]:
38
+ global total_count
39
+ total_count += 1
40
+ print(total_count)
41
+ os.system("nvidia-smi")
42
+ conversation = []
43
+ if system_prompt:
44
+ conversation.append({"role": "system", "content": system_prompt})
45
+ for user, assistant in chat_history:
46
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
47
+ conversation.append({"role": "user", "content": message})
48
+
49
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
50
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
51
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
52
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
53
+ input_ids = input_ids.to(model.device)
54
+
55
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
56
+ generate_kwargs = dict(
57
+ {"input_ids": input_ids},
58
+ streamer=streamer,
59
+ max_new_tokens=max_new_tokens,
60
+ do_sample=False,
61
+ top_p=top_p,
62
+ top_k=top_k,
63
+ num_beams=1,
64
+ # temperature=temperature,
65
+ repetition_penalty=repetition_penalty,
66
+ eos_token_id=32021
67
+ )
68
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
69
+ t.start()
70
+
71
+ outputs = []
72
+ for text in streamer:
73
+ outputs.append(text)
74
+ yield "".join(outputs).replace("<|EOT|>","")
75
+
76
+
77
+ chat_interface = gr.ChatInterface(
78
+ fn=generate,
79
+ additional_inputs=[
80
+ gr.Textbox(label="System prompt", lines=6),
81
+ gr.Slider(
82
+ label="Max new tokens",
83
+ minimum=1,
84
+ maximum=MAX_MAX_NEW_TOKENS,
85
+ step=1,
86
+ value=DEFAULT_MAX_NEW_TOKENS,
87
+ ),
88
+ # gr.Slider(
89
+ # label="Temperature",
90
+ # minimum=0,
91
+ # maximum=4.0,
92
+ # step=0.1,
93
+ # value=0,
94
+ # ),
95
+ gr.Slider(
96
+ label="Top-p (nucleus sampling)",
97
+ minimum=0.05,
98
+ maximum=1.0,
99
+ step=0.05,
100
+ value=0.9,
101
+ ),
102
+ gr.Slider(
103
+ label="Top-k",
104
+ minimum=1,
105
+ maximum=1000,
106
+ step=1,
107
+ value=50,
108
+ ),
109
+ gr.Slider(
110
+ label="Repetition penalty",
111
+ minimum=1.0,
112
+ maximum=2.0,
113
+ step=0.05,
114
+ value=1,
115
+ ),
116
+ ],
117
+ stop_btn=gr.Button("Stop"),
118
+ examples=[
119
+ ["implement snake game using pygame"],
120
+ ["Can you explain briefly to me what is the Python programming language?"],
121
+ ["write a program to find the factorial of a number"],
122
+ ],
123
+ )
124
+
125
+ with gr.Blocks(css="style.css") as demo:
126
+ gr.Markdown(DESCRIPTION)
127
+ chat_interface.render()
128
+
129
+ if __name__ == "__main__":
130
+ demo.queue(max_size=20).launch()