bhaskartripathi mikeee commited on
Commit
b061c53
0 Parent(s):

Duplicate from mikeee/gradio-chatinterface

Browse files

Co-authored-by: mikeee <mikeee@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: gradio-interface-tryout
3
+ emoji: 🚀
4
+ colorFrom: yellow
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.39.0
8
+ app_file: app-org.py
9
+ duplicated_from: mikeee/gradio-chatinterface
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/examples_list.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
app-org.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Try out gradio.Chatinterface.
3
+
4
+ colab gradio-chatinterface.
5
+
6
+ %%writefile reuirements.txt
7
+ gradio
8
+ transformers
9
+ sentencepiece
10
+ torch
11
+
12
+ """
13
+ # pylint: disable=line-too-long, missing-module-docstring, missing-function-docstring
14
+ # import torch
15
+ from time import time
16
+
17
+ import gradio as gr
18
+ from about_time import about_time
19
+ from examples_list import examples_list
20
+ from transformers import AutoModel, AutoTokenizer # AutoModelForCausalLM,
21
+
22
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
23
+
24
+ # tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga2", use_fast=False)
25
+ # model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga2", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
26
+ # system_prompt = "### System:\nYou are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
27
+ # pipeline = pipeline(task="text-generation", model="meta-llama/Llama-2-7b")
28
+ tokenizer = AutoTokenizer.from_pretrained(
29
+ "THUDM/chatglm2-6b-int4", trust_remote_code=True
30
+ )
31
+ chat_model = AutoModel.from_pretrained(
32
+ "THUDM/chatglm2-6b-int4", trust_remote_code=True # 3.92G
33
+ ).float()
34
+
35
+
36
+ def chat(message, history):
37
+ # prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n"
38
+ # inputs = tokenizer(prompt, return_tensors="pt").to(device=device)
39
+ # output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
40
+ # return tokenizer.decode(output[0], skip_special_tokens=True)
41
+ flag = 1
42
+ then = time()
43
+ prefix = ""
44
+ prelude = 0.0
45
+ with about_time() as dur:
46
+ for response, _ in chat_model.stream_chat(
47
+ tokenizer, message, history, max_length=2048, top_p=0.7, temperature=0.95
48
+ ):
49
+ if flag:
50
+ flag = 0
51
+ prelude = time() - then
52
+ prefix = f"{prelude:.2f}s"
53
+ yield f"{prefix} {response}"
54
+ suffix = f"\n(time elapsed: {dur.duration_human}, {(time() - prelude)/len(response):.2f}s/char)"
55
+ yield f"{response}{suffix}"
56
+
57
+ chatbot = gr.Chatbot([], label="Bot", height=450)
58
+ textbox = gr.Textbox('', scale=10, label='', lines=2, placeholder="Ask me anything")
59
+ submit_btn = gr.Button(value="▶️ Send", scale=1, min_width=0, variant="primary")
60
+
61
+ interf = gr.ChatInterface(
62
+ chat,
63
+ chatbot=chatbot,
64
+ textbox=textbox,
65
+ submit_btn=submit_btn,
66
+ title="gradio-chatinterface-tryout",
67
+ examples=examples_list,
68
+ theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),
69
+ ).queue(max_size=5)
70
+
71
+
72
+ if __name__ == "__main__":
73
+ interf.launch(debug=True)
app.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Try out gradio.Chatinterface.
3
+
4
+ colab gradio-chatinterface.
5
+
6
+ %%writefile reuirements.txt
7
+ gradio
8
+ transformers
9
+ sentencepiece
10
+ torch
11
+ cpm_kernels
12
+
13
+ import gradio as gr
14
+
15
+ def greet(name):
16
+ return "Hello " + name + "!"
17
+
18
+ with gr.Blocks() as demo:
19
+ name = gr.Textbox(label="Name")
20
+ output = gr.Textbox(label="Output Box")
21
+ greet_btn = gr.Button("Greet")
22
+ greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
23
+
24
+
25
+ demo.launch()
26
+
27
+ """
28
+ # pylint: disable=line-too-long, missing-module-docstring, missing-function-docstring
29
+ # import torch
30
+ import random
31
+ import time
32
+
33
+ import gradio as gr
34
+
35
+
36
+ def respond2(message, chat_history):
37
+ if chat_history is None:
38
+ chat_history = []
39
+ bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
40
+
41
+ temp = ""
42
+ chat_history.append((message, temp))
43
+ for elm in range(len(bot_message)):
44
+ temp = bot_message[:elm+1]
45
+ time.sleep(0.2)
46
+ chat_history[-1] = message, temp
47
+ # yield message, chat_history
48
+ # chatbot.value = chat_history
49
+
50
+ chat_history[-1] = (message, "done " + bot_message)
51
+ time.sleep(2)
52
+
53
+ yield "", chat_history
54
+
55
+ def stream_chat():
56
+ """
57
+ List samples.
58
+
59
+ Sure [('test me', 'Sure')]
60
+ Sure, [('test me', 'Sure,')]
61
+ Sure, I [('test me', 'Sure, I')]
62
+ Sure, I' [('test me', "Sure, I'")]
63
+ Sure, I'd [('test me', "Sure, I'd")]
64
+ """
65
+ resp = ""
66
+ for elm in range(10):
67
+ resp += str(elm)
68
+ from time import sleep
69
+ sleep(0.1)
70
+ yield resp
71
+
72
+
73
+ def chat(message="", history=[]):
74
+ # prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n"
75
+ # inputs = tokenizer(prompt, return_tensors="pt").to(device=device)
76
+ # output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
77
+ # return tokenizer.decode(output[0], skip_special_tokens=True)
78
+ _ = """
79
+ for response, _ in chat_model.stream_chat(
80
+ tokenizer, message, history, max_length=2048, top_p=0.7, temperature=0.95
81
+ ):
82
+ yield response
83
+ """
84
+ g = update_chatbot()
85
+ g.send(None)
86
+
87
+ for response in stream_chat():
88
+ # yield response
89
+ g.send(response)
90
+ yield response
91
+
92
+ yield 'done ' + response
93
+
94
+
95
+ def update_chatbot():
96
+ while 1:
97
+ message = yield
98
+ print(f"{message=}")
99
+
100
+
101
+ def greet(name):
102
+ return "Hello " + name + "!"
103
+
104
+ with gr.Blocks() as block:
105
+ name = gr.Textbox(label="Name")
106
+ output = gr.Textbox(label="Output Box")
107
+ greet_btn = gr.Button("Greet")
108
+ # greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
109
+
110
+ greet_btn.click(fn=chat, inputs=name, outputs=output, api_name="greet")
111
+
112
+ _ = """
113
+ with gr.Blocks(theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),) as block:
114
+ chatbot = gr.Chatbot()
115
+ msg = gr.Textbox()
116
+
117
+ # gr.ChatInterface(
118
+ block(
119
+ chat,
120
+ [msg, chatbot],
121
+ [chatbot],
122
+ # title="gradio-chatinterface-tryout",
123
+ # examples=examples_list,
124
+ ).queue(max_size=2).launch()
125
+ # """
126
+
127
+ # block.queue(max_size=2).launch()
128
+
129
+ with gr.Blocks() as demo:
130
+ chatbot = gr.Chatbot()
131
+ msg = gr.Textbox()
132
+ clear = gr.ClearButton([msg, chatbot])
133
+
134
+ def respond(message, chat_history):
135
+ bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
136
+ chat_history.append((message, bot_message))
137
+ time.sleep(2)
138
+ return "", chat_history
139
+
140
+ def respond1(message, chat_history):
141
+ if chat_history is None:
142
+ chat_history = []
143
+ bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
144
+
145
+ temp = ""
146
+ chat_history.append((message, temp))
147
+ for elm in range(len(bot_message)):
148
+ temp = bot_message[:elm+1]
149
+ time.sleep(0.2)
150
+ chat_history[-1] = message, temp
151
+ yield message, chat_history
152
+
153
+ chat_history[-1] = (message, "done " + bot_message)
154
+ time.sleep(2)
155
+
156
+ yield "", chat_history
157
+
158
+ msg.submit(respond2, [msg, chatbot], [msg, chatbot])
159
+
160
+ # demo.queue(max_size=2).launch()
examples_list.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
2
+ examples_list = [
3
+ ["What NFL team won the Super Bowl in the year Justin Bieber was born?"],
4
+ [
5
+ "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
6
+ ],
7
+ ["How to pick a lock? Provide detailed steps."],
8
+ [ "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth?"
9
+ ],
10
+ [
11
+ "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth? Think step by step."
12
+ ],
13
+ ["is infinity + 1 bigger than infinity?"],
14
+ ["Explain the plot of Cinderella in a sentence."],
15
+ [
16
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
17
+ ],
18
+ ["What are some common mistakes to avoid when writing code?"],
19
+ ["Build a prompt to generate a beautiful portrait of a horse"],
20
+ ["Suggest four metaphors to describe the benefits of AI"],
21
+ ["Write a pop song about leaving home for the sandy beaches."],
22
+ ["Write a summary demonstrating my ability to tame lions"],
23
+ ["鲁迅和周树人什么关系? 说中文。"],
24
+ ["鲁迅和周树人什么关系?"],
25
+ ["鲁迅和周树人什么关系? 用英文回答。"],
26
+ ["从前有一头牛,这头牛后面有什么?"],
27
+ ["正无穷大加一大于正无穷大吗?"],
28
+ ["正无穷大加正无穷大大于正无穷大吗?"],
29
+ ["-2的平方根等于什么?"],
30
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
31
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
32
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
33
+ [f"Translate ths following to Chinese. List 2 variants: \n{etext}"],
34
+ [f"{etext} 翻成中文,列出3个版本。"],
35
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"],
36
+ ["假定 1 + 2 = 4, 试求 7 + 8。"],
37
+ ["给出判断一个数是不是质数的 javascript 码。"],
38
+ ["给出实现python 里 range(10)的 javascript 码。"],
39
+ ["给出实现python 里 [*(range(10)]的 javascript 码。"],
40
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
41
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."],
42
+ ]
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ sentencepiece
3
+ torch
4
+ cpm_kernels
5
+ # diffusers
6
+ # accelerate
7
+ # xformers
8
+ # fairscale
9
+ # fire
10
+ # protobuf
11
+ # mdtex2html
12
+
13
+ about-time
run-app.bat ADDED
@@ -0,0 +1 @@
 
 
1
+ nodemon -w app.py -x python app.py